Commit | Line | Data |
---|---|---|
27b012e2 MD |
1 | #ifndef _URCU_H |
2 | #define _URCU_H | |
3 | ||
4 | /* The "volatile" is due to gcc bugs */ | |
5 | #define barrier() __asm__ __volatile__("": : :"memory") | |
6 | ||
7 | /* x86 32/64 specific */ | |
8 | #define mb() asm volatile("mfence":::"memory") | |
9 | #define rmb() asm volatile("lfence":::"memory") | |
10 | #define wmb() asm volatile("sfence" ::: "memory") | |
11 | ||
12 | ||
13 | ||
14 | /* x86 32 */ | |
15 | static inline void atomic_inc(int *v) | |
16 | { | |
17 | asm volatile("lock; incl %0" | |
f69f195a | 18 | : "+m" (*v)); |
27b012e2 MD |
19 | } |
20 | ||
21 | /* Nop everywhere except on alpha. */ | |
22 | #define smp_read_barrier_depends() | |
23 | ||
41718ff9 MD |
24 | /* |
25 | * Prevent the compiler from merging or refetching accesses. The compiler | |
26 | * is also forbidden from reordering successive instances of ACCESS_ONCE(), | |
27 | * but only when the compiler is aware of some particular ordering. One way | |
28 | * to make the compiler aware of ordering is to put the two invocations of | |
29 | * ACCESS_ONCE() in different C statements. | |
30 | * | |
31 | * This macro does absolutely -nothing- to prevent the CPU from reordering, | |
32 | * merging, or refetching absolutely anything at any time. Its main intended | |
33 | * use is to mediate communication between process-level code and irq/NMI | |
34 | * handlers, all running on the same CPU. | |
35 | */ | |
36 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) | |
37 | ||
38 | /** | |
39 | * rcu_dereference - fetch an RCU-protected pointer in an | |
40 | * RCU read-side critical section. This pointer may later | |
41 | * be safely dereferenced. | |
42 | * | |
43 | * Inserts memory barriers on architectures that require them | |
44 | * (currently only the Alpha), and, more importantly, documents | |
45 | * exactly which pointers are protected by RCU. | |
46 | */ | |
47 | ||
48 | #define rcu_dereference(p) ({ \ | |
49 | typeof(p) _________p1 = ACCESS_ONCE(p); \ | |
50 | smp_read_barrier_depends(); \ | |
51 | (_________p1); \ | |
52 | }) | |
53 | ||
27b012e2 MD |
54 | #define SIGURCU SIGUSR1 |
55 | ||
56 | /* Global quiescent period parity */ | |
57 | extern int urcu_qparity; | |
58 | ||
59 | extern int __thread urcu_active_readers[2]; | |
60 | ||
61 | static inline int get_urcu_qparity(void) | |
62 | { | |
63 | return urcu_qparity; | |
64 | } | |
65 | ||
66 | /* | |
67 | * returns urcu_parity. | |
68 | */ | |
69 | static inline int rcu_read_lock(void) | |
70 | { | |
71 | int urcu_parity = get_urcu_qparity(); | |
72 | urcu_active_readers[urcu_parity]++; | |
73 | /* | |
74 | * Increment active readers count before accessing the pointer. | |
75 | * See force_mb_all_threads(). | |
76 | */ | |
77 | barrier(); | |
78 | return urcu_parity; | |
79 | } | |
80 | ||
81 | static inline void rcu_read_unlock(int urcu_parity) | |
82 | { | |
83 | barrier(); | |
84 | /* | |
85 | * Finish using rcu before decrementing the pointer. | |
86 | * See force_mb_all_threads(). | |
87 | */ | |
88 | urcu_active_readers[urcu_parity]--; | |
89 | } | |
90 | ||
41718ff9 MD |
91 | extern void rcu_write_lock(void); |
92 | extern void rcu_write_unlock(void); | |
93 | ||
27b012e2 MD |
94 | extern void *urcu_publish_content(void **ptr, void *new); |
95 | ||
96 | /* | |
97 | * Reader thread registration. | |
98 | */ | |
99 | extern void urcu_register_thread(void); | |
100 | extern void urcu_register_thread(void); | |
101 | ||
102 | #endif /* _URCU_H */ |