Commit | Line | Data |
---|---|---|
bbad9379 | 1 | /* |
a57eb940 | 2 | * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition |
bbad9379 | 3 | * Internal non-public definitions that provide either classic |
a57eb940 | 4 | * or preemptible semantics. |
bbad9379 PM |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; either version 2 of the License, or | |
9 | * (at your option) any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program; if not, write to the Free Software | |
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
19 | * | |
a57eb940 | 20 | * Copyright (c) 2010 Linaro |
bbad9379 PM |
21 | * |
22 | * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> | |
23 | */ | |
24 | ||
a57eb940 PM |
25 | #ifdef CONFIG_TINY_PREEMPT_RCU |
26 | ||
27 | #include <linux/delay.h> | |
28 | ||
a57eb940 PM |
29 | /* Global control variables for preemptible RCU. */ |
30 | struct rcu_preempt_ctrlblk { | |
31 | struct rcu_ctrlblk rcb; /* curtail: ->next ptr of last CB for GP. */ | |
32 | struct rcu_head **nexttail; | |
33 | /* Tasks blocked in a preemptible RCU */ | |
34 | /* read-side critical section while an */ | |
35 | /* preemptible-RCU grace period is in */ | |
36 | /* progress must wait for a later grace */ | |
37 | /* period. This pointer points to the */ | |
38 | /* ->next pointer of the last task that */ | |
39 | /* must wait for a later grace period, or */ | |
40 | /* to &->rcb.rcucblist if there is no */ | |
41 | /* such task. */ | |
42 | struct list_head blkd_tasks; | |
43 | /* Tasks blocked in RCU read-side critical */ | |
44 | /* section. Tasks are placed at the head */ | |
45 | /* of this list and age towards the tail. */ | |
46 | struct list_head *gp_tasks; | |
47 | /* Pointer to the first task blocking the */ | |
48 | /* current grace period, or NULL if there */ | |
49 | /* is not such task. */ | |
50 | struct list_head *exp_tasks; | |
51 | /* Pointer to first task blocking the */ | |
52 | /* current expedited grace period, or NULL */ | |
53 | /* if there is no such task. If there */ | |
54 | /* is no current expedited grace period, */ | |
55 | /* then there cannot be any such task. */ | |
56 | u8 gpnum; /* Current grace period. */ | |
57 | u8 gpcpu; /* Last grace period blocked by the CPU. */ | |
58 | u8 completed; /* Last grace period completed. */ | |
59 | /* If all three are equal, RCU is idle. */ | |
60 | }; | |
61 | ||
62 | static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = { | |
63 | .rcb.donetail = &rcu_preempt_ctrlblk.rcb.rcucblist, | |
64 | .rcb.curtail = &rcu_preempt_ctrlblk.rcb.rcucblist, | |
65 | .nexttail = &rcu_preempt_ctrlblk.rcb.rcucblist, | |
66 | .blkd_tasks = LIST_HEAD_INIT(rcu_preempt_ctrlblk.blkd_tasks), | |
67 | }; | |
68 | ||
69 | static int rcu_preempted_readers_exp(void); | |
70 | static void rcu_report_exp_done(void); | |
71 | ||
72 | /* | |
73 | * Return true if the CPU has not yet responded to the current grace period. | |
74 | */ | |
75 | static int rcu_cpu_cur_gp(void) | |
76 | { | |
77 | return rcu_preempt_ctrlblk.gpcpu != rcu_preempt_ctrlblk.gpnum; | |
78 | } | |
79 | ||
80 | /* | |
81 | * Check for a running RCU reader. Because there is only one CPU, | |
82 | * there can be but one running RCU reader at a time. ;-) | |
83 | */ | |
84 | static int rcu_preempt_running_reader(void) | |
85 | { | |
86 | return current->rcu_read_lock_nesting; | |
87 | } | |
88 | ||
89 | /* | |
90 | * Check for preempted RCU readers blocking any grace period. | |
91 | * If the caller needs a reliable answer, it must disable hard irqs. | |
92 | */ | |
93 | static int rcu_preempt_blocked_readers_any(void) | |
94 | { | |
95 | return !list_empty(&rcu_preempt_ctrlblk.blkd_tasks); | |
96 | } | |
97 | ||
98 | /* | |
99 | * Check for preempted RCU readers blocking the current grace period. | |
100 | * If the caller needs a reliable answer, it must disable hard irqs. | |
101 | */ | |
102 | static int rcu_preempt_blocked_readers_cgp(void) | |
103 | { | |
104 | return rcu_preempt_ctrlblk.gp_tasks != NULL; | |
105 | } | |
106 | ||
107 | /* | |
108 | * Return true if another preemptible-RCU grace period is needed. | |
109 | */ | |
110 | static int rcu_preempt_needs_another_gp(void) | |
111 | { | |
112 | return *rcu_preempt_ctrlblk.rcb.curtail != NULL; | |
113 | } | |
114 | ||
115 | /* | |
116 | * Return true if a preemptible-RCU grace period is in progress. | |
117 | * The caller must disable hardirqs. | |
118 | */ | |
119 | static int rcu_preempt_gp_in_progress(void) | |
120 | { | |
121 | return rcu_preempt_ctrlblk.completed != rcu_preempt_ctrlblk.gpnum; | |
122 | } | |
123 | ||
124 | /* | |
125 | * Record a preemptible-RCU quiescent state for the specified CPU. Note | |
126 | * that this just means that the task currently running on the CPU is | |
127 | * in a quiescent state. There might be any number of tasks blocked | |
128 | * while in an RCU read-side critical section. | |
129 | * | |
130 | * Unlike the other rcu_*_qs() functions, callers to this function | |
131 | * must disable irqs in order to protect the assignment to | |
132 | * ->rcu_read_unlock_special. | |
133 | * | |
134 | * Because this is a single-CPU implementation, the only way a grace | |
135 | * period can end is if the CPU is in a quiescent state. The reason is | |
136 | * that a blocked preemptible-RCU reader can exit its critical section | |
137 | * only if the CPU is running it at the time. Therefore, when the | |
138 | * last task blocking the current grace period exits its RCU read-side | |
139 | * critical section, neither the CPU nor blocked tasks will be stopping | |
140 | * the current grace period. (In contrast, SMP implementations | |
141 | * might have CPUs running in RCU read-side critical sections that | |
142 | * block later grace periods -- but this is not possible given only | |
143 | * one CPU.) | |
144 | */ | |
145 | static void rcu_preempt_cpu_qs(void) | |
146 | { | |
147 | /* Record both CPU and task as having responded to current GP. */ | |
148 | rcu_preempt_ctrlblk.gpcpu = rcu_preempt_ctrlblk.gpnum; | |
149 | current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; | |
150 | ||
151 | /* | |
152 | * If there is no GP, or if blocked readers are still blocking GP, | |
153 | * then there is nothing more to do. | |
154 | */ | |
155 | if (!rcu_preempt_gp_in_progress() || rcu_preempt_blocked_readers_cgp()) | |
156 | return; | |
157 | ||
158 | /* Advance callbacks. */ | |
159 | rcu_preempt_ctrlblk.completed = rcu_preempt_ctrlblk.gpnum; | |
160 | rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.rcb.curtail; | |
161 | rcu_preempt_ctrlblk.rcb.curtail = rcu_preempt_ctrlblk.nexttail; | |
162 | ||
163 | /* If there are no blocked readers, next GP is done instantly. */ | |
164 | if (!rcu_preempt_blocked_readers_any()) | |
165 | rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.nexttail; | |
166 | ||
167 | /* If there are done callbacks, make RCU_SOFTIRQ process them. */ | |
168 | if (*rcu_preempt_ctrlblk.rcb.donetail != NULL) | |
169 | raise_softirq(RCU_SOFTIRQ); | |
170 | } | |
171 | ||
172 | /* | |
173 | * Start a new RCU grace period if warranted. Hard irqs must be disabled. | |
174 | */ | |
175 | static void rcu_preempt_start_gp(void) | |
176 | { | |
177 | if (!rcu_preempt_gp_in_progress() && rcu_preempt_needs_another_gp()) { | |
178 | ||
179 | /* Official start of GP. */ | |
180 | rcu_preempt_ctrlblk.gpnum++; | |
181 | ||
182 | /* Any blocked RCU readers block new GP. */ | |
183 | if (rcu_preempt_blocked_readers_any()) | |
184 | rcu_preempt_ctrlblk.gp_tasks = | |
185 | rcu_preempt_ctrlblk.blkd_tasks.next; | |
186 | ||
187 | /* If there is no running reader, CPU is done with GP. */ | |
188 | if (!rcu_preempt_running_reader()) | |
189 | rcu_preempt_cpu_qs(); | |
190 | } | |
191 | } | |
192 | ||
193 | /* | |
194 | * We have entered the scheduler, and the current task might soon be | |
195 | * context-switched away from. If this task is in an RCU read-side | |
196 | * critical section, we will no longer be able to rely on the CPU to | |
197 | * record that fact, so we enqueue the task on the blkd_tasks list. | |
198 | * If the task started after the current grace period began, as recorded | |
199 | * by ->gpcpu, we enqueue at the beginning of the list. Otherwise | |
200 | * before the element referenced by ->gp_tasks (or at the tail if | |
201 | * ->gp_tasks is NULL) and point ->gp_tasks at the newly added element. | |
202 | * The task will dequeue itself when it exits the outermost enclosing | |
203 | * RCU read-side critical section. Therefore, the current grace period | |
204 | * cannot be permitted to complete until the ->gp_tasks pointer becomes | |
205 | * NULL. | |
206 | * | |
207 | * Caller must disable preemption. | |
208 | */ | |
209 | void rcu_preempt_note_context_switch(void) | |
210 | { | |
211 | struct task_struct *t = current; | |
212 | unsigned long flags; | |
213 | ||
214 | local_irq_save(flags); /* must exclude scheduler_tick(). */ | |
215 | if (rcu_preempt_running_reader() && | |
216 | (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { | |
217 | ||
218 | /* Possibly blocking in an RCU read-side critical section. */ | |
219 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; | |
220 | ||
221 | /* | |
222 | * If this CPU has already checked in, then this task | |
223 | * will hold up the next grace period rather than the | |
224 | * current grace period. Queue the task accordingly. | |
225 | * If the task is queued for the current grace period | |
226 | * (i.e., this CPU has not yet passed through a quiescent | |
227 | * state for the current grace period), then as long | |
228 | * as that task remains queued, the current grace period | |
229 | * cannot end. | |
230 | */ | |
231 | list_add(&t->rcu_node_entry, &rcu_preempt_ctrlblk.blkd_tasks); | |
232 | if (rcu_cpu_cur_gp()) | |
233 | rcu_preempt_ctrlblk.gp_tasks = &t->rcu_node_entry; | |
234 | } | |
235 | ||
236 | /* | |
237 | * Either we were not in an RCU read-side critical section to | |
238 | * begin with, or we have now recorded that critical section | |
239 | * globally. Either way, we can now note a quiescent state | |
240 | * for this CPU. Again, if we were in an RCU read-side critical | |
241 | * section, and if that critical section was blocking the current | |
242 | * grace period, then the fact that the task has been enqueued | |
243 | * means that current grace period continues to be blocked. | |
244 | */ | |
245 | rcu_preempt_cpu_qs(); | |
246 | local_irq_restore(flags); | |
247 | } | |
248 | ||
249 | /* | |
250 | * Tiny-preemptible RCU implementation for rcu_read_lock(). | |
251 | * Just increment ->rcu_read_lock_nesting, shared state will be updated | |
252 | * if we block. | |
253 | */ | |
254 | void __rcu_read_lock(void) | |
255 | { | |
256 | current->rcu_read_lock_nesting++; | |
257 | barrier(); /* needed if we ever invoke rcu_read_lock in rcutiny.c */ | |
258 | } | |
259 | EXPORT_SYMBOL_GPL(__rcu_read_lock); | |
260 | ||
261 | /* | |
262 | * Handle special cases during rcu_read_unlock(), such as needing to | |
263 | * notify RCU core processing or task having blocked during the RCU | |
264 | * read-side critical section. | |
265 | */ | |
266 | static void rcu_read_unlock_special(struct task_struct *t) | |
267 | { | |
268 | int empty; | |
269 | int empty_exp; | |
270 | unsigned long flags; | |
271 | struct list_head *np; | |
272 | int special; | |
273 | ||
274 | /* | |
275 | * NMI handlers cannot block and cannot safely manipulate state. | |
276 | * They therefore cannot possibly be special, so just leave. | |
277 | */ | |
278 | if (in_nmi()) | |
279 | return; | |
280 | ||
281 | local_irq_save(flags); | |
282 | ||
283 | /* | |
284 | * If RCU core is waiting for this CPU to exit critical section, | |
285 | * let it know that we have done so. | |
286 | */ | |
287 | special = t->rcu_read_unlock_special; | |
288 | if (special & RCU_READ_UNLOCK_NEED_QS) | |
289 | rcu_preempt_cpu_qs(); | |
290 | ||
291 | /* Hardware IRQ handlers cannot block. */ | |
292 | if (in_irq()) { | |
293 | local_irq_restore(flags); | |
294 | return; | |
295 | } | |
296 | ||
297 | /* Clean up if blocked during RCU read-side critical section. */ | |
298 | if (special & RCU_READ_UNLOCK_BLOCKED) { | |
299 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED; | |
300 | ||
301 | /* | |
302 | * Remove this task from the ->blkd_tasks list and adjust | |
303 | * any pointers that might have been referencing it. | |
304 | */ | |
305 | empty = !rcu_preempt_blocked_readers_cgp(); | |
306 | empty_exp = rcu_preempt_ctrlblk.exp_tasks == NULL; | |
307 | np = t->rcu_node_entry.next; | |
308 | if (np == &rcu_preempt_ctrlblk.blkd_tasks) | |
309 | np = NULL; | |
310 | list_del(&t->rcu_node_entry); | |
311 | if (&t->rcu_node_entry == rcu_preempt_ctrlblk.gp_tasks) | |
312 | rcu_preempt_ctrlblk.gp_tasks = np; | |
313 | if (&t->rcu_node_entry == rcu_preempt_ctrlblk.exp_tasks) | |
314 | rcu_preempt_ctrlblk.exp_tasks = np; | |
315 | INIT_LIST_HEAD(&t->rcu_node_entry); | |
316 | ||
317 | /* | |
318 | * If this was the last task on the current list, and if | |
319 | * we aren't waiting on the CPU, report the quiescent state | |
320 | * and start a new grace period if needed. | |
321 | */ | |
322 | if (!empty && !rcu_preempt_blocked_readers_cgp()) { | |
323 | rcu_preempt_cpu_qs(); | |
324 | rcu_preempt_start_gp(); | |
325 | } | |
326 | ||
327 | /* | |
328 | * If this was the last task on the expedited lists, | |
329 | * then we need wake up the waiting task. | |
330 | */ | |
331 | if (!empty_exp && rcu_preempt_ctrlblk.exp_tasks == NULL) | |
332 | rcu_report_exp_done(); | |
333 | } | |
334 | local_irq_restore(flags); | |
335 | } | |
336 | ||
337 | /* | |
338 | * Tiny-preemptible RCU implementation for rcu_read_unlock(). | |
339 | * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost | |
340 | * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then | |
341 | * invoke rcu_read_unlock_special() to clean up after a context switch | |
342 | * in an RCU read-side critical section and other special cases. | |
343 | */ | |
344 | void __rcu_read_unlock(void) | |
345 | { | |
346 | struct task_struct *t = current; | |
347 | ||
348 | barrier(); /* needed if we ever invoke rcu_read_unlock in rcutiny.c */ | |
349 | --t->rcu_read_lock_nesting; | |
350 | barrier(); /* decrement before load of ->rcu_read_unlock_special */ | |
351 | if (t->rcu_read_lock_nesting == 0 && | |
352 | unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) | |
353 | rcu_read_unlock_special(t); | |
354 | #ifdef CONFIG_PROVE_LOCKING | |
355 | WARN_ON_ONCE(t->rcu_read_lock_nesting < 0); | |
356 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ | |
357 | } | |
358 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); | |
359 | ||
360 | /* | |
361 | * Check for a quiescent state from the current CPU. When a task blocks, | |
362 | * the task is recorded in the rcu_preempt_ctrlblk structure, which is | |
363 | * checked elsewhere. This is called from the scheduling-clock interrupt. | |
364 | * | |
365 | * Caller must disable hard irqs. | |
366 | */ | |
367 | static void rcu_preempt_check_callbacks(void) | |
368 | { | |
369 | struct task_struct *t = current; | |
370 | ||
371 | if (!rcu_preempt_running_reader() && rcu_preempt_gp_in_progress()) | |
372 | rcu_preempt_cpu_qs(); | |
373 | if (&rcu_preempt_ctrlblk.rcb.rcucblist != | |
374 | rcu_preempt_ctrlblk.rcb.donetail) | |
375 | raise_softirq(RCU_SOFTIRQ); | |
376 | if (rcu_preempt_gp_in_progress() && rcu_preempt_running_reader()) | |
377 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; | |
378 | } | |
379 | ||
380 | /* | |
381 | * TINY_PREEMPT_RCU has an extra callback-list tail pointer to | |
382 | * update, so this is invoked from __rcu_process_callbacks() to | |
383 | * handle that case. Of course, it is invoked for all flavors of | |
384 | * RCU, but RCU callbacks can appear only on one of the lists, and | |
385 | * neither ->nexttail nor ->donetail can possibly be NULL, so there | |
386 | * is no need for an explicit check. | |
387 | */ | |
388 | static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp) | |
389 | { | |
390 | if (rcu_preempt_ctrlblk.nexttail == rcp->donetail) | |
391 | rcu_preempt_ctrlblk.nexttail = &rcp->rcucblist; | |
392 | } | |
393 | ||
394 | /* | |
395 | * Process callbacks for preemptible RCU. | |
396 | */ | |
397 | static void rcu_preempt_process_callbacks(void) | |
398 | { | |
399 | __rcu_process_callbacks(&rcu_preempt_ctrlblk.rcb); | |
400 | } | |
401 | ||
402 | /* | |
403 | * Queue a preemptible -RCU callback for invocation after a grace period. | |
404 | */ | |
405 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |
406 | { | |
407 | unsigned long flags; | |
408 | ||
409 | debug_rcu_head_queue(head); | |
410 | head->func = func; | |
411 | head->next = NULL; | |
412 | ||
413 | local_irq_save(flags); | |
414 | *rcu_preempt_ctrlblk.nexttail = head; | |
415 | rcu_preempt_ctrlblk.nexttail = &head->next; | |
416 | rcu_preempt_start_gp(); /* checks to see if GP needed. */ | |
417 | local_irq_restore(flags); | |
418 | } | |
419 | EXPORT_SYMBOL_GPL(call_rcu); | |
420 | ||
421 | void rcu_barrier(void) | |
422 | { | |
423 | struct rcu_synchronize rcu; | |
424 | ||
425 | init_rcu_head_on_stack(&rcu.head); | |
426 | init_completion(&rcu.completion); | |
427 | /* Will wake me after RCU finished. */ | |
428 | call_rcu(&rcu.head, wakeme_after_rcu); | |
429 | /* Wait for it. */ | |
430 | wait_for_completion(&rcu.completion); | |
431 | destroy_rcu_head_on_stack(&rcu.head); | |
432 | } | |
433 | EXPORT_SYMBOL_GPL(rcu_barrier); | |
434 | ||
435 | /* | |
436 | * synchronize_rcu - wait until a grace period has elapsed. | |
437 | * | |
438 | * Control will return to the caller some time after a full grace | |
439 | * period has elapsed, in other words after all currently executing RCU | |
440 | * read-side critical sections have completed. RCU read-side critical | |
441 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | |
442 | * and may be nested. | |
443 | */ | |
444 | void synchronize_rcu(void) | |
445 | { | |
446 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
447 | if (!rcu_scheduler_active) | |
448 | return; | |
449 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | |
450 | ||
451 | WARN_ON_ONCE(rcu_preempt_running_reader()); | |
452 | if (!rcu_preempt_blocked_readers_any()) | |
453 | return; | |
454 | ||
455 | /* Once we get past the fastpath checks, same code as rcu_barrier(). */ | |
456 | rcu_barrier(); | |
457 | } | |
458 | EXPORT_SYMBOL_GPL(synchronize_rcu); | |
459 | ||
460 | static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq); | |
461 | static unsigned long sync_rcu_preempt_exp_count; | |
462 | static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex); | |
463 | ||
464 | /* | |
465 | * Return non-zero if there are any tasks in RCU read-side critical | |
466 | * sections blocking the current preemptible-RCU expedited grace period. | |
467 | * If there is no preemptible-RCU expedited grace period currently in | |
468 | * progress, returns zero unconditionally. | |
469 | */ | |
470 | static int rcu_preempted_readers_exp(void) | |
471 | { | |
472 | return rcu_preempt_ctrlblk.exp_tasks != NULL; | |
473 | } | |
474 | ||
475 | /* | |
476 | * Report the exit from RCU read-side critical section for the last task | |
477 | * that queued itself during or before the current expedited preemptible-RCU | |
478 | * grace period. | |
479 | */ | |
480 | static void rcu_report_exp_done(void) | |
481 | { | |
482 | wake_up(&sync_rcu_preempt_exp_wq); | |
483 | } | |
484 | ||
485 | /* | |
486 | * Wait for an rcu-preempt grace period, but expedite it. The basic idea | |
487 | * is to rely in the fact that there is but one CPU, and that it is | |
488 | * illegal for a task to invoke synchronize_rcu_expedited() while in a | |
489 | * preemptible-RCU read-side critical section. Therefore, any such | |
490 | * critical sections must correspond to blocked tasks, which must therefore | |
491 | * be on the ->blkd_tasks list. So just record the current head of the | |
492 | * list in the ->exp_tasks pointer, and wait for all tasks including and | |
493 | * after the task pointed to by ->exp_tasks to drain. | |
494 | */ | |
495 | void synchronize_rcu_expedited(void) | |
496 | { | |
497 | unsigned long flags; | |
498 | struct rcu_preempt_ctrlblk *rpcp = &rcu_preempt_ctrlblk; | |
499 | unsigned long snap; | |
500 | ||
501 | barrier(); /* ensure prior action seen before grace period. */ | |
502 | ||
503 | WARN_ON_ONCE(rcu_preempt_running_reader()); | |
504 | ||
505 | /* | |
506 | * Acquire lock so that there is only one preemptible RCU grace | |
507 | * period in flight. Of course, if someone does the expedited | |
508 | * grace period for us while we are acquiring the lock, just leave. | |
509 | */ | |
510 | snap = sync_rcu_preempt_exp_count + 1; | |
511 | mutex_lock(&sync_rcu_preempt_exp_mutex); | |
512 | if (ULONG_CMP_LT(snap, sync_rcu_preempt_exp_count)) | |
513 | goto unlock_mb_ret; /* Others did our work for us. */ | |
514 | ||
515 | local_irq_save(flags); | |
516 | ||
517 | /* | |
518 | * All RCU readers have to already be on blkd_tasks because | |
519 | * we cannot legally be executing in an RCU read-side critical | |
520 | * section. | |
521 | */ | |
522 | ||
523 | /* Snapshot current head of ->blkd_tasks list. */ | |
524 | rpcp->exp_tasks = rpcp->blkd_tasks.next; | |
525 | if (rpcp->exp_tasks == &rpcp->blkd_tasks) | |
526 | rpcp->exp_tasks = NULL; | |
527 | local_irq_restore(flags); | |
528 | ||
529 | /* Wait for tail of ->blkd_tasks list to drain. */ | |
530 | if (rcu_preempted_readers_exp()) | |
531 | wait_event(sync_rcu_preempt_exp_wq, | |
532 | !rcu_preempted_readers_exp()); | |
533 | ||
534 | /* Clean up and exit. */ | |
535 | barrier(); /* ensure expedited GP seen before counter increment. */ | |
536 | sync_rcu_preempt_exp_count++; | |
537 | unlock_mb_ret: | |
538 | mutex_unlock(&sync_rcu_preempt_exp_mutex); | |
539 | barrier(); /* ensure subsequent action seen after grace period. */ | |
540 | } | |
541 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | |
542 | ||
543 | /* | |
544 | * Does preemptible RCU need the CPU to stay out of dynticks mode? | |
545 | */ | |
546 | int rcu_preempt_needs_cpu(void) | |
547 | { | |
548 | if (!rcu_preempt_running_reader()) | |
549 | rcu_preempt_cpu_qs(); | |
550 | return rcu_preempt_ctrlblk.rcb.rcucblist != NULL; | |
551 | } | |
552 | ||
553 | /* | |
554 | * Check for a task exiting while in a preemptible -RCU read-side | |
555 | * critical section, clean up if so. No need to issue warnings, | |
556 | * as debug_check_no_locks_held() already does this if lockdep | |
557 | * is enabled. | |
558 | */ | |
559 | void exit_rcu(void) | |
560 | { | |
561 | struct task_struct *t = current; | |
562 | ||
563 | if (t->rcu_read_lock_nesting == 0) | |
564 | return; | |
565 | t->rcu_read_lock_nesting = 1; | |
566 | rcu_read_unlock(); | |
567 | } | |
568 | ||
569 | #else /* #ifdef CONFIG_TINY_PREEMPT_RCU */ | |
570 | ||
571 | /* | |
572 | * Because preemptible RCU does not exist, it never has any callbacks | |
573 | * to check. | |
574 | */ | |
575 | static void rcu_preempt_check_callbacks(void) | |
576 | { | |
577 | } | |
578 | ||
579 | /* | |
580 | * Because preemptible RCU does not exist, it never has any callbacks | |
581 | * to remove. | |
582 | */ | |
583 | static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp) | |
584 | { | |
585 | } | |
586 | ||
587 | /* | |
588 | * Because preemptible RCU does not exist, it never has any callbacks | |
589 | * to process. | |
590 | */ | |
591 | static void rcu_preempt_process_callbacks(void) | |
592 | { | |
593 | } | |
594 | ||
595 | #endif /* #else #ifdef CONFIG_TINY_PREEMPT_RCU */ | |
596 | ||
bbad9379 PM |
597 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
598 | ||
599 | #include <linux/kernel_stat.h> | |
600 | ||
601 | /* | |
602 | * During boot, we forgive RCU lockdep issues. After this function is | |
603 | * invoked, we start taking RCU lockdep issues seriously. | |
604 | */ | |
605 | void rcu_scheduler_starting(void) | |
606 | { | |
607 | WARN_ON(nr_context_switches() > 0); | |
608 | rcu_scheduler_active = 1; | |
609 | } | |
610 | ||
611 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |