Commit | Line | Data |
---|---|---|
e260be67 PM |
1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion (RT implementation) | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
17 | * | |
18 | * Copyright (C) IBM Corporation, 2006 | |
19 | * | |
20 | * Author: Paul McKenney <paulmck@us.ibm.com> | |
21 | * | |
22 | * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com> | |
23 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | |
24 | * Papers: | |
25 | * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf | |
26 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) | |
27 | * | |
28 | * For detailed explanation of Read-Copy Update mechanism see - | |
29 | * Documentation/RCU | |
30 | * | |
31 | */ | |
32 | ||
33 | #ifndef __LINUX_RCUPREEMPT_H | |
34 | #define __LINUX_RCUPREEMPT_H | |
35 | ||
e260be67 PM |
36 | #include <linux/cache.h> |
37 | #include <linux/spinlock.h> | |
38 | #include <linux/threads.h> | |
ac44021f | 39 | #include <linux/smp.h> |
e260be67 PM |
40 | #include <linux/cpumask.h> |
41 | #include <linux/seqlock.h> | |
42 | ||
a979241c IM |
43 | extern void rcu_qsctr_inc(int cpu); |
44 | static inline void rcu_bh_qsctr_inc(int cpu) { } | |
dd0078f4 SR |
45 | |
46 | /* | |
47 | * Someone might want to pass call_rcu_bh as a function pointer. | |
48 | * So this needs to just be a rename and not a macro function. | |
49 | * (no parentheses) | |
50 | */ | |
a979241c | 51 | #define call_rcu_bh call_rcu |
e260be67 | 52 | |
4446a36f PM |
53 | /** |
54 | * call_rcu_sched - Queue RCU callback for invocation after sched grace period. | |
55 | * @head: structure to be used for queueing the RCU updates. | |
56 | * @func: actual update function to be invoked after the grace period | |
57 | * | |
58 | * The update function will be invoked some time after a full | |
59 | * synchronize_sched()-style grace period elapses, in other words after | |
60 | * all currently executing preempt-disabled sections of code (including | |
61 | * hardirq handlers, NMI handlers, and local_irq_save() blocks) have | |
62 | * completed. | |
63 | */ | |
64 | extern void call_rcu_sched(struct rcu_head *head, | |
65 | void (*func)(struct rcu_head *head)); | |
66 | ||
b55ab616 PM |
67 | extern void __rcu_read_lock(void) __acquires(RCU); |
68 | extern void __rcu_read_unlock(void) __releases(RCU); | |
e260be67 PM |
69 | extern int rcu_pending(int cpu); |
70 | extern int rcu_needs_cpu(int cpu); | |
71 | ||
72 | #define __rcu_read_lock_bh() { rcu_read_lock(); local_bh_disable(); } | |
73 | #define __rcu_read_unlock_bh() { local_bh_enable(); rcu_read_unlock(); } | |
74 | ||
75 | extern void __synchronize_sched(void); | |
76 | ||
77 | extern void __rcu_init(void); | |
4446a36f | 78 | extern void rcu_init_sched(void); |
e260be67 PM |
79 | extern void rcu_check_callbacks(int cpu, int user); |
80 | extern void rcu_restart_cpu(int cpu); | |
81 | extern long rcu_batches_completed(void); | |
82 | ||
83 | /* | |
84 | * Return the number of RCU batches processed thus far. Useful for debug | |
85 | * and statistic. The _bh variant is identifcal to straight RCU | |
86 | */ | |
87 | static inline long rcu_batches_completed_bh(void) | |
88 | { | |
89 | return rcu_batches_completed(); | |
90 | } | |
91 | ||
92 | #ifdef CONFIG_RCU_TRACE | |
93 | struct rcupreempt_trace; | |
94 | extern long *rcupreempt_flipctr(int cpu); | |
95 | extern long rcupreempt_data_completed(void); | |
96 | extern int rcupreempt_flip_flag(int cpu); | |
97 | extern int rcupreempt_mb_flag(int cpu); | |
98 | extern char *rcupreempt_try_flip_state_name(void); | |
99 | extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu); | |
100 | #endif | |
101 | ||
102 | struct softirq_action; | |
103 | ||
2232c2d8 | 104 | #ifdef CONFIG_NO_HZ |
a979241c IM |
105 | extern void rcu_enter_nohz(void); |
106 | extern void rcu_exit_nohz(void); | |
107 | #else | |
108 | # define rcu_enter_nohz() do { } while (0) | |
109 | # define rcu_exit_nohz() do { } while (0) | |
110 | #endif | |
2232c2d8 | 111 | |
a6826048 PM |
112 | /* |
113 | * A context switch is a grace period for rcupreempt synchronize_rcu() | |
114 | * only during early boot, before the scheduler has been initialized. | |
115 | * So, how the heck do we get a context switch? Well, if the caller | |
116 | * invokes synchronize_rcu(), they are willing to accept a context | |
117 | * switch, so we simply pretend that one happened. | |
118 | * | |
119 | * After boot, there might be a blocked or preempted task in an RCU | |
120 | * read-side critical section, so we cannot then take the fastpath. | |
121 | */ | |
122 | static inline int rcu_blocking_is_gp(void) | |
123 | { | |
124 | return num_online_cpus() == 1 && !rcu_scheduler_active; | |
125 | } | |
126 | ||
e260be67 | 127 | #endif /* __LINUX_RCUPREEMPT_H */ |