[PATCH] lockdep: prove rwsem locking correctness
[deliverable/linux.git] / kernel / spinlock.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (2004) Linus Torvalds
3 *
4 * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
5 *
fb1c8f93
IM
6 * Copyright (2004, 2005) Ingo Molnar
7 *
8 * This file contains the spinlock/rwlock implementations for the
9 * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
1da177e4
LT
10 */
11
1da177e4
LT
12#include <linux/linkage.h>
13#include <linux/preempt.h>
14#include <linux/spinlock.h>
15#include <linux/interrupt.h>
16#include <linux/module.h>
17
18/*
19 * Generic declaration of the raw read_trylock() function,
20 * architectures are supposed to optimize this:
21 */
fb1c8f93 22int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock)
1da177e4 23{
fb1c8f93 24 __raw_read_lock(lock);
1da177e4
LT
25 return 1;
26}
fb1c8f93 27EXPORT_SYMBOL(generic__raw_read_trylock);
1da177e4
LT
28
29int __lockfunc _spin_trylock(spinlock_t *lock)
30{
31 preempt_disable();
32 if (_raw_spin_trylock(lock))
33 return 1;
34
35 preempt_enable();
36 return 0;
37}
38EXPORT_SYMBOL(_spin_trylock);
39
40int __lockfunc _read_trylock(rwlock_t *lock)
41{
42 preempt_disable();
43 if (_raw_read_trylock(lock))
44 return 1;
45
46 preempt_enable();
47 return 0;
48}
49EXPORT_SYMBOL(_read_trylock);
50
51int __lockfunc _write_trylock(rwlock_t *lock)
52{
53 preempt_disable();
54 if (_raw_write_trylock(lock))
55 return 1;
56
57 preempt_enable();
58 return 0;
59}
60EXPORT_SYMBOL(_write_trylock);
61
fb1c8f93 62#if !defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP)
1da177e4
LT
63
64void __lockfunc _read_lock(rwlock_t *lock)
65{
66 preempt_disable();
67 _raw_read_lock(lock);
68}
69EXPORT_SYMBOL(_read_lock);
70
71unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
72{
73 unsigned long flags;
74
75 local_irq_save(flags);
76 preempt_disable();
fb1c8f93 77 _raw_spin_lock_flags(lock, &flags);
1da177e4
LT
78 return flags;
79}
80EXPORT_SYMBOL(_spin_lock_irqsave);
81
82void __lockfunc _spin_lock_irq(spinlock_t *lock)
83{
84 local_irq_disable();
85 preempt_disable();
86 _raw_spin_lock(lock);
87}
88EXPORT_SYMBOL(_spin_lock_irq);
89
90void __lockfunc _spin_lock_bh(spinlock_t *lock)
91{
92 local_bh_disable();
93 preempt_disable();
94 _raw_spin_lock(lock);
95}
96EXPORT_SYMBOL(_spin_lock_bh);
97
98unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
99{
100 unsigned long flags;
101
102 local_irq_save(flags);
103 preempt_disable();
104 _raw_read_lock(lock);
105 return flags;
106}
107EXPORT_SYMBOL(_read_lock_irqsave);
108
109void __lockfunc _read_lock_irq(rwlock_t *lock)
110{
111 local_irq_disable();
112 preempt_disable();
113 _raw_read_lock(lock);
114}
115EXPORT_SYMBOL(_read_lock_irq);
116
117void __lockfunc _read_lock_bh(rwlock_t *lock)
118{
119 local_bh_disable();
120 preempt_disable();
121 _raw_read_lock(lock);
122}
123EXPORT_SYMBOL(_read_lock_bh);
124
125unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
126{
127 unsigned long flags;
128
129 local_irq_save(flags);
130 preempt_disable();
131 _raw_write_lock(lock);
132 return flags;
133}
134EXPORT_SYMBOL(_write_lock_irqsave);
135
136void __lockfunc _write_lock_irq(rwlock_t *lock)
137{
138 local_irq_disable();
139 preempt_disable();
140 _raw_write_lock(lock);
141}
142EXPORT_SYMBOL(_write_lock_irq);
143
144void __lockfunc _write_lock_bh(rwlock_t *lock)
145{
146 local_bh_disable();
147 preempt_disable();
148 _raw_write_lock(lock);
149}
150EXPORT_SYMBOL(_write_lock_bh);
151
152void __lockfunc _spin_lock(spinlock_t *lock)
153{
154 preempt_disable();
155 _raw_spin_lock(lock);
156}
157
158EXPORT_SYMBOL(_spin_lock);
159
160void __lockfunc _write_lock(rwlock_t *lock)
161{
162 preempt_disable();
163 _raw_write_lock(lock);
164}
165
166EXPORT_SYMBOL(_write_lock);
167
168#else /* CONFIG_PREEMPT: */
169
170/*
171 * This could be a long-held lock. We both prepare to spin for a long
172 * time (making _this_ CPU preemptable if possible), and we also signal
173 * towards that other CPU that it should break the lock ASAP.
174 *
175 * (We do this in a function because inlining it would be excessive.)
176 */
177
178#define BUILD_LOCK_OPS(op, locktype) \
179void __lockfunc _##op##_lock(locktype##_t *lock) \
180{ \
1da177e4 181 for (;;) { \
ee25e96f 182 preempt_disable(); \
1da177e4
LT
183 if (likely(_raw_##op##_trylock(lock))) \
184 break; \
185 preempt_enable(); \
ee25e96f 186 \
1da177e4
LT
187 if (!(lock)->break_lock) \
188 (lock)->break_lock = 1; \
189 while (!op##_can_lock(lock) && (lock)->break_lock) \
190 cpu_relax(); \
1da177e4
LT
191 } \
192 (lock)->break_lock = 0; \
193} \
194 \
195EXPORT_SYMBOL(_##op##_lock); \
196 \
197unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
198{ \
199 unsigned long flags; \
200 \
1da177e4 201 for (;;) { \
ee25e96f 202 preempt_disable(); \
1da177e4
LT
203 local_irq_save(flags); \
204 if (likely(_raw_##op##_trylock(lock))) \
205 break; \
206 local_irq_restore(flags); \
1da177e4 207 preempt_enable(); \
ee25e96f 208 \
1da177e4
LT
209 if (!(lock)->break_lock) \
210 (lock)->break_lock = 1; \
211 while (!op##_can_lock(lock) && (lock)->break_lock) \
212 cpu_relax(); \
1da177e4
LT
213 } \
214 (lock)->break_lock = 0; \
215 return flags; \
216} \
217 \
218EXPORT_SYMBOL(_##op##_lock_irqsave); \
219 \
220void __lockfunc _##op##_lock_irq(locktype##_t *lock) \
221{ \
222 _##op##_lock_irqsave(lock); \
223} \
224 \
225EXPORT_SYMBOL(_##op##_lock_irq); \
226 \
227void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
228{ \
229 unsigned long flags; \
230 \
231 /* */ \
232 /* Careful: we must exclude softirqs too, hence the */ \
233 /* irq-disabling. We use the generic preemption-aware */ \
234 /* function: */ \
235 /**/ \
236 flags = _##op##_lock_irqsave(lock); \
237 local_bh_disable(); \
238 local_irq_restore(flags); \
239} \
240 \
241EXPORT_SYMBOL(_##op##_lock_bh)
242
243/*
244 * Build preemption-friendly versions of the following
245 * lock-spinning functions:
246 *
247 * _[spin|read|write]_lock()
248 * _[spin|read|write]_lock_irq()
249 * _[spin|read|write]_lock_irqsave()
250 * _[spin|read|write]_lock_bh()
251 */
252BUILD_LOCK_OPS(spin, spinlock);
253BUILD_LOCK_OPS(read, rwlock);
254BUILD_LOCK_OPS(write, rwlock);
255
256#endif /* CONFIG_PREEMPT */
257
258void __lockfunc _spin_unlock(spinlock_t *lock)
259{
260 _raw_spin_unlock(lock);
261 preempt_enable();
262}
263EXPORT_SYMBOL(_spin_unlock);
264
265void __lockfunc _write_unlock(rwlock_t *lock)
266{
267 _raw_write_unlock(lock);
268 preempt_enable();
269}
270EXPORT_SYMBOL(_write_unlock);
271
272void __lockfunc _read_unlock(rwlock_t *lock)
273{
274 _raw_read_unlock(lock);
275 preempt_enable();
276}
277EXPORT_SYMBOL(_read_unlock);
278
279void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
280{
281 _raw_spin_unlock(lock);
282 local_irq_restore(flags);
283 preempt_enable();
284}
285EXPORT_SYMBOL(_spin_unlock_irqrestore);
286
287void __lockfunc _spin_unlock_irq(spinlock_t *lock)
288{
289 _raw_spin_unlock(lock);
290 local_irq_enable();
291 preempt_enable();
292}
293EXPORT_SYMBOL(_spin_unlock_irq);
294
295void __lockfunc _spin_unlock_bh(spinlock_t *lock)
296{
297 _raw_spin_unlock(lock);
10f02d1c 298 preempt_enable_no_resched();
1da177e4
LT
299 local_bh_enable();
300}
301EXPORT_SYMBOL(_spin_unlock_bh);
302
303void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
304{
305 _raw_read_unlock(lock);
306 local_irq_restore(flags);
307 preempt_enable();
308}
309EXPORT_SYMBOL(_read_unlock_irqrestore);
310
311void __lockfunc _read_unlock_irq(rwlock_t *lock)
312{
313 _raw_read_unlock(lock);
314 local_irq_enable();
315 preempt_enable();
316}
317EXPORT_SYMBOL(_read_unlock_irq);
318
319void __lockfunc _read_unlock_bh(rwlock_t *lock)
320{
321 _raw_read_unlock(lock);
10f02d1c 322 preempt_enable_no_resched();
1da177e4
LT
323 local_bh_enable();
324}
325EXPORT_SYMBOL(_read_unlock_bh);
326
327void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
328{
329 _raw_write_unlock(lock);
330 local_irq_restore(flags);
331 preempt_enable();
332}
333EXPORT_SYMBOL(_write_unlock_irqrestore);
334
335void __lockfunc _write_unlock_irq(rwlock_t *lock)
336{
337 _raw_write_unlock(lock);
338 local_irq_enable();
339 preempt_enable();
340}
341EXPORT_SYMBOL(_write_unlock_irq);
342
343void __lockfunc _write_unlock_bh(rwlock_t *lock)
344{
345 _raw_write_unlock(lock);
10f02d1c 346 preempt_enable_no_resched();
1da177e4
LT
347 local_bh_enable();
348}
349EXPORT_SYMBOL(_write_unlock_bh);
350
351int __lockfunc _spin_trylock_bh(spinlock_t *lock)
352{
353 local_bh_disable();
354 preempt_disable();
355 if (_raw_spin_trylock(lock))
356 return 1;
357
10f02d1c 358 preempt_enable_no_resched();
1da177e4
LT
359 local_bh_enable();
360 return 0;
361}
362EXPORT_SYMBOL(_spin_trylock_bh);
363
364int in_lock_functions(unsigned long addr)
365{
366 /* Linker adds these: start and end of __lockfunc functions */
367 extern char __lock_text_start[], __lock_text_end[];
368
369 return addr >= (unsigned long)__lock_text_start
370 && addr < (unsigned long)__lock_text_end;
371}
372EXPORT_SYMBOL(in_lock_functions);
This page took 0.327488 seconds and 5 git commands to generate.