rcu: Add WARN_ON_ONCE() consistency checks covering state transitions
[deliverable/linux.git] / kernel / rcutree_plugin.h
1 /*
2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions that provide either classic
4 * or preemptable semantics.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 *
20 * Copyright Red Hat, 2009
21 * Copyright IBM Corporation, 2009
22 *
23 * Author: Ingo Molnar <mingo@elte.hu>
24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
25 */
26
27
28 #ifdef CONFIG_TREE_PREEMPT_RCU
29
30 struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state);
31 DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
32
33 /*
34 * Tell them what RCU they are running.
35 */
36 static inline void rcu_bootup_announce(void)
37 {
38 printk(KERN_INFO
39 "Experimental preemptable hierarchical RCU implementation.\n");
40 }
41
42 /*
43 * Return the number of RCU-preempt batches processed thus far
44 * for debug and statistics.
45 */
46 long rcu_batches_completed_preempt(void)
47 {
48 return rcu_preempt_state.completed;
49 }
50 EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
51
52 /*
53 * Return the number of RCU batches processed thus far for debug & stats.
54 */
55 long rcu_batches_completed(void)
56 {
57 return rcu_batches_completed_preempt();
58 }
59 EXPORT_SYMBOL_GPL(rcu_batches_completed);
60
61 /*
62 * Record a preemptable-RCU quiescent state for the specified CPU. Note
63 * that this just means that the task currently running on the CPU is
64 * not in a quiescent state. There might be any number of tasks blocked
65 * while in an RCU read-side critical section.
66 */
67 static void rcu_preempt_qs(int cpu)
68 {
69 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
70 rdp->passed_quiesc_completed = rdp->completed;
71 barrier();
72 rdp->passed_quiesc = 1;
73 }
74
75 /*
76 * We have entered the scheduler, and the current task might soon be
77 * context-switched away from. If this task is in an RCU read-side
78 * critical section, we will no longer be able to rely on the CPU to
79 * record that fact, so we enqueue the task on the appropriate entry
80 * of the blocked_tasks[] array. The task will dequeue itself when
81 * it exits the outermost enclosing RCU read-side critical section.
82 * Therefore, the current grace period cannot be permitted to complete
83 * until the blocked_tasks[] entry indexed by the low-order bit of
84 * rnp->gpnum empties.
85 *
86 * Caller must disable preemption.
87 */
88 static void rcu_preempt_note_context_switch(int cpu)
89 {
90 struct task_struct *t = current;
91 unsigned long flags;
92 int phase;
93 struct rcu_data *rdp;
94 struct rcu_node *rnp;
95
96 if (t->rcu_read_lock_nesting &&
97 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
98
99 /* Possibly blocking in an RCU read-side critical section. */
100 rdp = rcu_preempt_state.rda[cpu];
101 rnp = rdp->mynode;
102 spin_lock_irqsave(&rnp->lock, flags);
103 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
104 t->rcu_blocked_node = rnp;
105
106 /*
107 * If this CPU has already checked in, then this task
108 * will hold up the next grace period rather than the
109 * current grace period. Queue the task accordingly.
110 * If the task is queued for the current grace period
111 * (i.e., this CPU has not yet passed through a quiescent
112 * state for the current grace period), then as long
113 * as that task remains queued, the current grace period
114 * cannot end.
115 *
116 * But first, note that the current CPU must still be
117 * on line!
118 */
119 WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
120 phase = !(rnp->qsmask & rdp->grpmask) ^ (rnp->gpnum & 0x1);
121 list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]);
122 smp_mb(); /* Ensure later ctxt swtch seen after above. */
123 spin_unlock_irqrestore(&rnp->lock, flags);
124 }
125
126 /*
127 * Either we were not in an RCU read-side critical section to
128 * begin with, or we have now recorded that critical section
129 * globally. Either way, we can now note a quiescent state
130 * for this CPU. Again, if we were in an RCU read-side critical
131 * section, and if that critical section was blocking the current
132 * grace period, then the fact that the task has been enqueued
133 * means that we continue to block the current grace period.
134 */
135 rcu_preempt_qs(cpu);
136 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
137 }
138
139 /*
140 * Tree-preemptable RCU implementation for rcu_read_lock().
141 * Just increment ->rcu_read_lock_nesting, shared state will be updated
142 * if we block.
143 */
144 void __rcu_read_lock(void)
145 {
146 ACCESS_ONCE(current->rcu_read_lock_nesting)++;
147 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
148 }
149 EXPORT_SYMBOL_GPL(__rcu_read_lock);
150
151 static void rcu_read_unlock_special(struct task_struct *t)
152 {
153 int empty;
154 unsigned long flags;
155 unsigned long mask;
156 struct rcu_node *rnp;
157 int special;
158
159 /* NMI handlers cannot block and cannot safely manipulate state. */
160 if (in_nmi())
161 return;
162
163 local_irq_save(flags);
164
165 /*
166 * If RCU core is waiting for this CPU to exit critical section,
167 * let it know that we have done so.
168 */
169 special = t->rcu_read_unlock_special;
170 if (special & RCU_READ_UNLOCK_NEED_QS) {
171 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
172 rcu_preempt_qs(smp_processor_id());
173 }
174
175 /* Hardware IRQ handlers cannot block. */
176 if (in_irq()) {
177 local_irq_restore(flags);
178 return;
179 }
180
181 /* Clean up if blocked during RCU read-side critical section. */
182 if (special & RCU_READ_UNLOCK_BLOCKED) {
183 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
184
185 /*
186 * Remove this task from the list it blocked on. The
187 * task can migrate while we acquire the lock, but at
188 * most one time. So at most two passes through loop.
189 */
190 for (;;) {
191 rnp = t->rcu_blocked_node;
192 spin_lock(&rnp->lock);
193 if (rnp == t->rcu_blocked_node)
194 break;
195 spin_unlock(&rnp->lock);
196 }
197 empty = list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]);
198 list_del_init(&t->rcu_node_entry);
199 t->rcu_blocked_node = NULL;
200
201 /*
202 * If this was the last task on the current list, and if
203 * we aren't waiting on any CPUs, report the quiescent state.
204 * Note that both cpu_quiet_msk_finish() and cpu_quiet_msk()
205 * drop rnp->lock and restore irq.
206 */
207 if (!empty && rnp->qsmask == 0 &&
208 list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1])) {
209 struct rcu_node *rnp_p;
210
211 if (rnp->parent == NULL) {
212 /* Only one rcu_node in the tree. */
213 cpu_quiet_msk_finish(&rcu_preempt_state, flags);
214 return;
215 }
216 /* Report up the rest of the hierarchy. */
217 mask = rnp->grpmask;
218 spin_unlock_irqrestore(&rnp->lock, flags);
219 rnp_p = rnp->parent;
220 spin_lock_irqsave(&rnp_p->lock, flags);
221 WARN_ON_ONCE(rnp->qsmask);
222 cpu_quiet_msk(mask, &rcu_preempt_state, rnp_p, flags);
223 return;
224 }
225 spin_unlock(&rnp->lock);
226 }
227 local_irq_restore(flags);
228 }
229
230 /*
231 * Tree-preemptable RCU implementation for rcu_read_unlock().
232 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
233 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
234 * invoke rcu_read_unlock_special() to clean up after a context switch
235 * in an RCU read-side critical section and other special cases.
236 */
237 void __rcu_read_unlock(void)
238 {
239 struct task_struct *t = current;
240
241 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
242 if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
243 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
244 rcu_read_unlock_special(t);
245 }
246 EXPORT_SYMBOL_GPL(__rcu_read_unlock);
247
248 #ifdef CONFIG_RCU_CPU_STALL_DETECTOR
249
250 /*
251 * Scan the current list of tasks blocked within RCU read-side critical
252 * sections, printing out the tid of each.
253 */
254 static void rcu_print_task_stall(struct rcu_node *rnp)
255 {
256 unsigned long flags;
257 struct list_head *lp;
258 int phase = rnp->gpnum & 0x1;
259 struct task_struct *t;
260
261 if (!list_empty(&rnp->blocked_tasks[phase])) {
262 spin_lock_irqsave(&rnp->lock, flags);
263 phase = rnp->gpnum & 0x1; /* re-read under lock. */
264 lp = &rnp->blocked_tasks[phase];
265 list_for_each_entry(t, lp, rcu_node_entry)
266 printk(" P%d", t->pid);
267 spin_unlock_irqrestore(&rnp->lock, flags);
268 }
269 }
270
271 #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
272
273 /*
274 * Check that the list of blocked tasks for the newly completed grace
275 * period is in fact empty. It is a serious bug to complete a grace
276 * period that still has RCU readers blocked! This function must be
277 * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
278 * must be held by the caller.
279 */
280 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
281 {
282 WARN_ON_ONCE(!list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]));
283 WARN_ON_ONCE(rnp->qsmask);
284 }
285
286 /*
287 * Check for preempted RCU readers for the specified rcu_node structure.
288 * If the caller needs a reliable answer, it must hold the rcu_node's
289 * >lock.
290 */
291 static int rcu_preempted_readers(struct rcu_node *rnp)
292 {
293 return !list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]);
294 }
295
296 #ifdef CONFIG_HOTPLUG_CPU
297
298 /*
299 * Handle tasklist migration for case in which all CPUs covered by the
300 * specified rcu_node have gone offline. Move them up to the root
301 * rcu_node. The reason for not just moving them to the immediate
302 * parent is to remove the need for rcu_read_unlock_special() to
303 * make more than two attempts to acquire the target rcu_node's lock.
304 *
305 * The caller must hold rnp->lock with irqs disabled.
306 */
307 static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
308 struct rcu_node *rnp,
309 struct rcu_data *rdp)
310 {
311 int i;
312 struct list_head *lp;
313 struct list_head *lp_root;
314 struct rcu_node *rnp_root = rcu_get_root(rsp);
315 struct task_struct *tp;
316
317 if (rnp == rnp_root) {
318 WARN_ONCE(1, "Last CPU thought to be offlined?");
319 return; /* Shouldn't happen: at least one CPU online. */
320 }
321 WARN_ON_ONCE(rnp != rdp->mynode &&
322 (!list_empty(&rnp->blocked_tasks[0]) ||
323 !list_empty(&rnp->blocked_tasks[1])));
324
325 /*
326 * Move tasks up to root rcu_node. Rely on the fact that the
327 * root rcu_node can be at most one ahead of the rest of the
328 * rcu_nodes in terms of gp_num value. This fact allows us to
329 * move the blocked_tasks[] array directly, element by element.
330 */
331 for (i = 0; i < 2; i++) {
332 lp = &rnp->blocked_tasks[i];
333 lp_root = &rnp_root->blocked_tasks[i];
334 while (!list_empty(lp)) {
335 tp = list_entry(lp->next, typeof(*tp), rcu_node_entry);
336 spin_lock(&rnp_root->lock); /* irqs already disabled */
337 list_del(&tp->rcu_node_entry);
338 tp->rcu_blocked_node = rnp_root;
339 list_add(&tp->rcu_node_entry, lp_root);
340 spin_unlock(&rnp_root->lock); /* irqs remain disabled */
341 }
342 }
343 }
344
345 /*
346 * Do CPU-offline processing for preemptable RCU.
347 */
348 static void rcu_preempt_offline_cpu(int cpu)
349 {
350 __rcu_offline_cpu(cpu, &rcu_preempt_state);
351 }
352
353 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
354
355 /*
356 * Check for a quiescent state from the current CPU. When a task blocks,
357 * the task is recorded in the corresponding CPU's rcu_node structure,
358 * which is checked elsewhere.
359 *
360 * Caller must disable hard irqs.
361 */
362 static void rcu_preempt_check_callbacks(int cpu)
363 {
364 struct task_struct *t = current;
365
366 if (t->rcu_read_lock_nesting == 0) {
367 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
368 rcu_preempt_qs(cpu);
369 return;
370 }
371 if (per_cpu(rcu_preempt_data, cpu).qs_pending) {
372 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
373 }
374 }
375
376 /*
377 * Process callbacks for preemptable RCU.
378 */
379 static void rcu_preempt_process_callbacks(void)
380 {
381 __rcu_process_callbacks(&rcu_preempt_state,
382 &__get_cpu_var(rcu_preempt_data));
383 }
384
385 /*
386 * Queue a preemptable-RCU callback for invocation after a grace period.
387 */
388 void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
389 {
390 __call_rcu(head, func, &rcu_preempt_state);
391 }
392 EXPORT_SYMBOL_GPL(call_rcu);
393
394 /*
395 * Check to see if there is any immediate preemptable-RCU-related work
396 * to be done.
397 */
398 static int rcu_preempt_pending(int cpu)
399 {
400 return __rcu_pending(&rcu_preempt_state,
401 &per_cpu(rcu_preempt_data, cpu));
402 }
403
404 /*
405 * Does preemptable RCU need the CPU to stay out of dynticks mode?
406 */
407 static int rcu_preempt_needs_cpu(int cpu)
408 {
409 return !!per_cpu(rcu_preempt_data, cpu).nxtlist;
410 }
411
412 /*
413 * Initialize preemptable RCU's per-CPU data.
414 */
415 static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
416 {
417 rcu_init_percpu_data(cpu, &rcu_preempt_state, 1);
418 }
419
420 /*
421 * Check for a task exiting while in a preemptable-RCU read-side
422 * critical section, clean up if so. No need to issue warnings,
423 * as debug_check_no_locks_held() already does this if lockdep
424 * is enabled.
425 */
426 void exit_rcu(void)
427 {
428 struct task_struct *t = current;
429
430 if (t->rcu_read_lock_nesting == 0)
431 return;
432 t->rcu_read_lock_nesting = 1;
433 rcu_read_unlock();
434 }
435
436 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
437
438 /*
439 * Tell them what RCU they are running.
440 */
441 static inline void rcu_bootup_announce(void)
442 {
443 printk(KERN_INFO "Hierarchical RCU implementation.\n");
444 }
445
446 /*
447 * Return the number of RCU batches processed thus far for debug & stats.
448 */
449 long rcu_batches_completed(void)
450 {
451 return rcu_batches_completed_sched();
452 }
453 EXPORT_SYMBOL_GPL(rcu_batches_completed);
454
455 /*
456 * Because preemptable RCU does not exist, we never have to check for
457 * CPUs being in quiescent states.
458 */
459 static void rcu_preempt_note_context_switch(int cpu)
460 {
461 }
462
463 #ifdef CONFIG_RCU_CPU_STALL_DETECTOR
464
465 /*
466 * Because preemptable RCU does not exist, we never have to check for
467 * tasks blocked within RCU read-side critical sections.
468 */
469 static void rcu_print_task_stall(struct rcu_node *rnp)
470 {
471 }
472
473 #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
474
475 /*
476 * Because there is no preemptable RCU, there can be no readers blocked,
477 * so there is no need to check for blocked tasks.
478 */
479 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
480 {
481 }
482
483 /*
484 * Because preemptable RCU does not exist, there are never any preempted
485 * RCU readers.
486 */
487 static int rcu_preempted_readers(struct rcu_node *rnp)
488 {
489 return 0;
490 }
491
492 #ifdef CONFIG_HOTPLUG_CPU
493
494 /*
495 * Because preemptable RCU does not exist, it never needs to migrate
496 * tasks that were blocked within RCU read-side critical sections.
497 */
498 static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
499 struct rcu_node *rnp,
500 struct rcu_data *rdp)
501 {
502 }
503
504 /*
505 * Because preemptable RCU does not exist, it never needs CPU-offline
506 * processing.
507 */
508 static void rcu_preempt_offline_cpu(int cpu)
509 {
510 }
511
512 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
513
514 /*
515 * Because preemptable RCU does not exist, it never has any callbacks
516 * to check.
517 */
518 void rcu_preempt_check_callbacks(int cpu)
519 {
520 }
521
522 /*
523 * Because preemptable RCU does not exist, it never has any callbacks
524 * to process.
525 */
526 void rcu_preempt_process_callbacks(void)
527 {
528 }
529
530 /*
531 * In classic RCU, call_rcu() is just call_rcu_sched().
532 */
533 void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
534 {
535 call_rcu_sched(head, func);
536 }
537 EXPORT_SYMBOL_GPL(call_rcu);
538
539 /*
540 * Because preemptable RCU does not exist, it never has any work to do.
541 */
542 static int rcu_preempt_pending(int cpu)
543 {
544 return 0;
545 }
546
547 /*
548 * Because preemptable RCU does not exist, it never needs any CPU.
549 */
550 static int rcu_preempt_needs_cpu(int cpu)
551 {
552 return 0;
553 }
554
555 /*
556 * Because preemptable RCU does not exist, there is no per-CPU
557 * data to initialize.
558 */
559 static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
560 {
561 }
562
563 #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
This page took 0.045144 seconds and 5 git commands to generate.