Commit | Line | Data |
---|---|---|
fb1c8f93 IM |
1 | #ifndef __LINUX_SPINLOCK_API_SMP_H |
2 | #define __LINUX_SPINLOCK_API_SMP_H | |
3 | ||
4 | #ifndef __LINUX_SPINLOCK_H | |
5 | # error "please don't include this file directly" | |
6 | #endif | |
7 | ||
8 | /* | |
9 | * include/linux/spinlock_api_smp.h | |
10 | * | |
11 | * spinlock API declarations on SMP (and debug) | |
12 | * (implemented in kernel/spinlock.c) | |
13 | * | |
14 | * portions Copyright 2005, Red Hat, Inc., Ingo Molnar | |
15 | * Released under the General Public License (GPL). | |
16 | */ | |
17 | ||
18 | int in_lock_functions(unsigned long addr); | |
19 | ||
c2f21ce2 | 20 | #define assert_raw_spin_locked(x) BUG_ON(!raw_spin_is_locked(x)) |
fb1c8f93 | 21 | |
9c1721aa TG |
22 | void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); |
23 | void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) | |
24 | __acquires(lock); | |
c2f21ce2 | 25 | void __lockfunc |
9c1721aa TG |
26 | _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) |
27 | __acquires(lock); | |
28 | void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock); | |
29 | void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) | |
30 | __acquires(lock); | |
31 | ||
32 | unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) | |
33 | __acquires(lock); | |
c2f21ce2 | 34 | unsigned long __lockfunc |
9c1721aa TG |
35 | _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass) |
36 | __acquires(lock); | |
37 | int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock); | |
38 | int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock); | |
39 | void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); | |
40 | void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) __releases(lock); | |
41 | void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock); | |
c2f21ce2 | 42 | void __lockfunc |
9c1721aa TG |
43 | _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) |
44 | __releases(lock); | |
fb1c8f93 | 45 | |
6beb0009 | 46 | #ifdef CONFIG_INLINE_SPIN_LOCK |
9c1721aa | 47 | #define _raw_spin_lock(lock) __raw_spin_lock(lock) |
892a7c67 HC |
48 | #endif |
49 | ||
6beb0009 | 50 | #ifdef CONFIG_INLINE_SPIN_LOCK_BH |
9c1721aa | 51 | #define _raw_spin_lock_bh(lock) __raw_spin_lock_bh(lock) |
892a7c67 HC |
52 | #endif |
53 | ||
6beb0009 | 54 | #ifdef CONFIG_INLINE_SPIN_LOCK_IRQ |
9c1721aa | 55 | #define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock) |
892a7c67 HC |
56 | #endif |
57 | ||
6beb0009 | 58 | #ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE |
9c1721aa | 59 | #define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock) |
892a7c67 HC |
60 | #endif |
61 | ||
6beb0009 | 62 | #ifdef CONFIG_INLINE_SPIN_TRYLOCK |
9c1721aa | 63 | #define _raw_spin_trylock(lock) __raw_spin_trylock(lock) |
892a7c67 HC |
64 | #endif |
65 | ||
6beb0009 | 66 | #ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH |
9c1721aa | 67 | #define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock) |
892a7c67 HC |
68 | #endif |
69 | ||
e335e3eb | 70 | #ifndef CONFIG_UNINLINE_SPIN_UNLOCK |
9c1721aa | 71 | #define _raw_spin_unlock(lock) __raw_spin_unlock(lock) |
892a7c67 HC |
72 | #endif |
73 | ||
6beb0009 | 74 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_BH |
9c1721aa | 75 | #define _raw_spin_unlock_bh(lock) __raw_spin_unlock_bh(lock) |
892a7c67 HC |
76 | #endif |
77 | ||
6beb0009 | 78 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ |
9c1721aa | 79 | #define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock) |
892a7c67 HC |
80 | #endif |
81 | ||
6beb0009 | 82 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE |
9c1721aa | 83 | #define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, flags) |
892a7c67 HC |
84 | #endif |
85 | ||
9c1721aa | 86 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
69d0ee73 HC |
87 | { |
88 | preempt_disable(); | |
9828ea9d | 89 | if (do_raw_spin_trylock(lock)) { |
69d0ee73 HC |
90 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
91 | return 1; | |
92 | } | |
93 | preempt_enable(); | |
94 | return 0; | |
95 | } | |
96 | ||
69d0ee73 HC |
97 | /* |
98 | * If lockdep is enabled then we use the non-preemption spin-ops | |
99 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are | |
100 | * not re-enabled during lock-acquire (which the preempt-spin-ops do): | |
101 | */ | |
102 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) | |
103 | ||
9c1721aa | 104 | static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock) |
69d0ee73 HC |
105 | { |
106 | unsigned long flags; | |
107 | ||
108 | local_irq_save(flags); | |
109 | preempt_disable(); | |
110 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | |
111 | /* | |
112 | * On lockdep we dont want the hand-coded irq-enable of | |
9828ea9d | 113 | * do_raw_spin_lock_flags() code, because lockdep assumes |
69d0ee73 HC |
114 | * that interrupts are not re-enabled during lock-acquire: |
115 | */ | |
116 | #ifdef CONFIG_LOCKDEP | |
9828ea9d | 117 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
69d0ee73 | 118 | #else |
9828ea9d | 119 | do_raw_spin_lock_flags(lock, &flags); |
69d0ee73 HC |
120 | #endif |
121 | return flags; | |
122 | } | |
123 | ||
9c1721aa | 124 | static inline void __raw_spin_lock_irq(raw_spinlock_t *lock) |
69d0ee73 HC |
125 | { |
126 | local_irq_disable(); | |
127 | preempt_disable(); | |
128 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | |
9828ea9d | 129 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
69d0ee73 HC |
130 | } |
131 | ||
9c1721aa | 132 | static inline void __raw_spin_lock_bh(raw_spinlock_t *lock) |
69d0ee73 | 133 | { |
9ea4c380 | 134 | __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); |
69d0ee73 | 135 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
9828ea9d | 136 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
69d0ee73 HC |
137 | } |
138 | ||
9c1721aa | 139 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
69d0ee73 HC |
140 | { |
141 | preempt_disable(); | |
142 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | |
9828ea9d | 143 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
69d0ee73 HC |
144 | } |
145 | ||
ae58403f | 146 | #endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */ |
69d0ee73 | 147 | |
9c1721aa | 148 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
69d0ee73 HC |
149 | { |
150 | spin_release(&lock->dep_map, 1, _RET_IP_); | |
9828ea9d | 151 | do_raw_spin_unlock(lock); |
69d0ee73 HC |
152 | preempt_enable(); |
153 | } | |
154 | ||
9c1721aa | 155 | static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock, |
69d0ee73 HC |
156 | unsigned long flags) |
157 | { | |
158 | spin_release(&lock->dep_map, 1, _RET_IP_); | |
9828ea9d | 159 | do_raw_spin_unlock(lock); |
69d0ee73 HC |
160 | local_irq_restore(flags); |
161 | preempt_enable(); | |
162 | } | |
163 | ||
9c1721aa | 164 | static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock) |
69d0ee73 HC |
165 | { |
166 | spin_release(&lock->dep_map, 1, _RET_IP_); | |
9828ea9d | 167 | do_raw_spin_unlock(lock); |
69d0ee73 HC |
168 | local_irq_enable(); |
169 | preempt_enable(); | |
170 | } | |
171 | ||
9c1721aa | 172 | static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock) |
69d0ee73 HC |
173 | { |
174 | spin_release(&lock->dep_map, 1, _RET_IP_); | |
9828ea9d | 175 | do_raw_spin_unlock(lock); |
9ea4c380 | 176 | __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); |
69d0ee73 HC |
177 | } |
178 | ||
9c1721aa | 179 | static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock) |
69d0ee73 | 180 | { |
9ea4c380 | 181 | __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); |
9828ea9d | 182 | if (do_raw_spin_trylock(lock)) { |
69d0ee73 HC |
183 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
184 | return 1; | |
185 | } | |
9ea4c380 | 186 | __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); |
69d0ee73 HC |
187 | return 0; |
188 | } | |
189 | ||
6b6b4792 TG |
190 | #include <linux/rwlock_api_smp.h> |
191 | ||
fb1c8f93 | 192 | #endif /* __LINUX_SPINLOCK_API_SMP_H */ |