Merge branch 'i2c/for-current' of git://git.kernel.org/pub/scm/linux/kernel/git/wsa...
[deliverable/linux.git] / kernel / locking / spinlock.c
... / ...
CommitLineData
1/*
2 * Copyright (2004) Linus Torvalds
3 *
4 * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
5 *
6 * Copyright (2004, 2005) Ingo Molnar
7 *
8 * This file contains the spinlock/rwlock implementations for the
9 * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
10 *
11 * Note that some architectures have special knowledge about the
12 * stack frames of these functions in their profile_pc. If you
13 * change anything significant here that could change the stack
14 * frame contact the architecture maintainers.
15 */
16
17#include <linux/linkage.h>
18#include <linux/preempt.h>
19#include <linux/spinlock.h>
20#include <linux/interrupt.h>
21#include <linux/debug_locks.h>
22#include <linux/export.h>
23
24/*
25 * If lockdep is enabled then we use the non-preemption spin-ops
26 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
27 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
28 */
29#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
30/*
31 * The __lock_function inlines are taken from
32 * include/linux/spinlock_api_smp.h
33 */
34#else
35#define raw_read_can_lock(l) read_can_lock(l)
36#define raw_write_can_lock(l) write_can_lock(l)
37
38/*
39 * Some architectures can relax in favour of the CPU owning the lock.
40 */
41#ifndef arch_read_relax
42# define arch_read_relax(l) cpu_relax()
43#endif
44#ifndef arch_write_relax
45# define arch_write_relax(l) cpu_relax()
46#endif
47#ifndef arch_spin_relax
48# define arch_spin_relax(l) cpu_relax()
49#endif
50
51/*
52 * We build the __lock_function inlines here. They are too large for
53 * inlining all over the place, but here is only one user per function
54 * which embedds them into the calling _lock_function below.
55 *
56 * This could be a long-held lock. We both prepare to spin for a long
57 * time (making _this_ CPU preemptable if possible), and we also signal
58 * towards that other CPU that it should break the lock ASAP.
59 */
60#define BUILD_LOCK_OPS(op, locktype) \
61void __lockfunc __raw_##op##_lock(locktype##_t *lock) \
62{ \
63 for (;;) { \
64 preempt_disable(); \
65 if (likely(do_raw_##op##_trylock(lock))) \
66 break; \
67 preempt_enable(); \
68 \
69 if (!(lock)->break_lock) \
70 (lock)->break_lock = 1; \
71 while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\
72 arch_##op##_relax(&lock->raw_lock); \
73 } \
74 (lock)->break_lock = 0; \
75} \
76 \
77unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
78{ \
79 unsigned long flags; \
80 \
81 for (;;) { \
82 preempt_disable(); \
83 local_irq_save(flags); \
84 if (likely(do_raw_##op##_trylock(lock))) \
85 break; \
86 local_irq_restore(flags); \
87 preempt_enable(); \
88 \
89 if (!(lock)->break_lock) \
90 (lock)->break_lock = 1; \
91 while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\
92 arch_##op##_relax(&lock->raw_lock); \
93 } \
94 (lock)->break_lock = 0; \
95 return flags; \
96} \
97 \
98void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock) \
99{ \
100 _raw_##op##_lock_irqsave(lock); \
101} \
102 \
103void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \
104{ \
105 unsigned long flags; \
106 \
107 /* */ \
108 /* Careful: we must exclude softirqs too, hence the */ \
109 /* irq-disabling. We use the generic preemption-aware */ \
110 /* function: */ \
111 /**/ \
112 flags = _raw_##op##_lock_irqsave(lock); \
113 local_bh_disable(); \
114 local_irq_restore(flags); \
115} \
116
117/*
118 * Build preemption-friendly versions of the following
119 * lock-spinning functions:
120 *
121 * __[spin|read|write]_lock()
122 * __[spin|read|write]_lock_irq()
123 * __[spin|read|write]_lock_irqsave()
124 * __[spin|read|write]_lock_bh()
125 */
126BUILD_LOCK_OPS(spin, raw_spinlock);
127BUILD_LOCK_OPS(read, rwlock);
128BUILD_LOCK_OPS(write, rwlock);
129
130#endif
131
132#ifndef CONFIG_INLINE_SPIN_TRYLOCK
133int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock)
134{
135 return __raw_spin_trylock(lock);
136}
137EXPORT_SYMBOL(_raw_spin_trylock);
138#endif
139
140#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
141int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock)
142{
143 return __raw_spin_trylock_bh(lock);
144}
145EXPORT_SYMBOL(_raw_spin_trylock_bh);
146#endif
147
148#ifndef CONFIG_INLINE_SPIN_LOCK
149void __lockfunc _raw_spin_lock(raw_spinlock_t *lock)
150{
151 __raw_spin_lock(lock);
152}
153EXPORT_SYMBOL(_raw_spin_lock);
154#endif
155
156#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
157unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
158{
159 return __raw_spin_lock_irqsave(lock);
160}
161EXPORT_SYMBOL(_raw_spin_lock_irqsave);
162#endif
163
164#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
165void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
166{
167 __raw_spin_lock_irq(lock);
168}
169EXPORT_SYMBOL(_raw_spin_lock_irq);
170#endif
171
172#ifndef CONFIG_INLINE_SPIN_LOCK_BH
173void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock)
174{
175 __raw_spin_lock_bh(lock);
176}
177EXPORT_SYMBOL(_raw_spin_lock_bh);
178#endif
179
180#ifdef CONFIG_UNINLINE_SPIN_UNLOCK
181void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock)
182{
183 __raw_spin_unlock(lock);
184}
185EXPORT_SYMBOL(_raw_spin_unlock);
186#endif
187
188#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
189void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
190{
191 __raw_spin_unlock_irqrestore(lock, flags);
192}
193EXPORT_SYMBOL(_raw_spin_unlock_irqrestore);
194#endif
195
196#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
197void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock)
198{
199 __raw_spin_unlock_irq(lock);
200}
201EXPORT_SYMBOL(_raw_spin_unlock_irq);
202#endif
203
204#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
205void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
206{
207 __raw_spin_unlock_bh(lock);
208}
209EXPORT_SYMBOL(_raw_spin_unlock_bh);
210#endif
211
212#ifndef CONFIG_INLINE_READ_TRYLOCK
213int __lockfunc _raw_read_trylock(rwlock_t *lock)
214{
215 return __raw_read_trylock(lock);
216}
217EXPORT_SYMBOL(_raw_read_trylock);
218#endif
219
220#ifndef CONFIG_INLINE_READ_LOCK
221void __lockfunc _raw_read_lock(rwlock_t *lock)
222{
223 __raw_read_lock(lock);
224}
225EXPORT_SYMBOL(_raw_read_lock);
226#endif
227
228#ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE
229unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
230{
231 return __raw_read_lock_irqsave(lock);
232}
233EXPORT_SYMBOL(_raw_read_lock_irqsave);
234#endif
235
236#ifndef CONFIG_INLINE_READ_LOCK_IRQ
237void __lockfunc _raw_read_lock_irq(rwlock_t *lock)
238{
239 __raw_read_lock_irq(lock);
240}
241EXPORT_SYMBOL(_raw_read_lock_irq);
242#endif
243
244#ifndef CONFIG_INLINE_READ_LOCK_BH
245void __lockfunc _raw_read_lock_bh(rwlock_t *lock)
246{
247 __raw_read_lock_bh(lock);
248}
249EXPORT_SYMBOL(_raw_read_lock_bh);
250#endif
251
252#ifndef CONFIG_INLINE_READ_UNLOCK
253void __lockfunc _raw_read_unlock(rwlock_t *lock)
254{
255 __raw_read_unlock(lock);
256}
257EXPORT_SYMBOL(_raw_read_unlock);
258#endif
259
260#ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
261void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
262{
263 __raw_read_unlock_irqrestore(lock, flags);
264}
265EXPORT_SYMBOL(_raw_read_unlock_irqrestore);
266#endif
267
268#ifndef CONFIG_INLINE_READ_UNLOCK_IRQ
269void __lockfunc _raw_read_unlock_irq(rwlock_t *lock)
270{
271 __raw_read_unlock_irq(lock);
272}
273EXPORT_SYMBOL(_raw_read_unlock_irq);
274#endif
275
276#ifndef CONFIG_INLINE_READ_UNLOCK_BH
277void __lockfunc _raw_read_unlock_bh(rwlock_t *lock)
278{
279 __raw_read_unlock_bh(lock);
280}
281EXPORT_SYMBOL(_raw_read_unlock_bh);
282#endif
283
284#ifndef CONFIG_INLINE_WRITE_TRYLOCK
285int __lockfunc _raw_write_trylock(rwlock_t *lock)
286{
287 return __raw_write_trylock(lock);
288}
289EXPORT_SYMBOL(_raw_write_trylock);
290#endif
291
292#ifndef CONFIG_INLINE_WRITE_LOCK
293void __lockfunc _raw_write_lock(rwlock_t *lock)
294{
295 __raw_write_lock(lock);
296}
297EXPORT_SYMBOL(_raw_write_lock);
298#endif
299
300#ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
301unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
302{
303 return __raw_write_lock_irqsave(lock);
304}
305EXPORT_SYMBOL(_raw_write_lock_irqsave);
306#endif
307
308#ifndef CONFIG_INLINE_WRITE_LOCK_IRQ
309void __lockfunc _raw_write_lock_irq(rwlock_t *lock)
310{
311 __raw_write_lock_irq(lock);
312}
313EXPORT_SYMBOL(_raw_write_lock_irq);
314#endif
315
316#ifndef CONFIG_INLINE_WRITE_LOCK_BH
317void __lockfunc _raw_write_lock_bh(rwlock_t *lock)
318{
319 __raw_write_lock_bh(lock);
320}
321EXPORT_SYMBOL(_raw_write_lock_bh);
322#endif
323
324#ifndef CONFIG_INLINE_WRITE_UNLOCK
325void __lockfunc _raw_write_unlock(rwlock_t *lock)
326{
327 __raw_write_unlock(lock);
328}
329EXPORT_SYMBOL(_raw_write_unlock);
330#endif
331
332#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
333void __lockfunc _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
334{
335 __raw_write_unlock_irqrestore(lock, flags);
336}
337EXPORT_SYMBOL(_raw_write_unlock_irqrestore);
338#endif
339
340#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ
341void __lockfunc _raw_write_unlock_irq(rwlock_t *lock)
342{
343 __raw_write_unlock_irq(lock);
344}
345EXPORT_SYMBOL(_raw_write_unlock_irq);
346#endif
347
348#ifndef CONFIG_INLINE_WRITE_UNLOCK_BH
349void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
350{
351 __raw_write_unlock_bh(lock);
352}
353EXPORT_SYMBOL(_raw_write_unlock_bh);
354#endif
355
356#ifdef CONFIG_DEBUG_LOCK_ALLOC
357
358void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
359{
360 preempt_disable();
361 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
362 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
363}
364EXPORT_SYMBOL(_raw_spin_lock_nested);
365
366void __lockfunc _raw_spin_lock_bh_nested(raw_spinlock_t *lock, int subclass)
367{
368 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
369 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
370 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
371}
372EXPORT_SYMBOL(_raw_spin_lock_bh_nested);
373
374unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock,
375 int subclass)
376{
377 unsigned long flags;
378
379 local_irq_save(flags);
380 preempt_disable();
381 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
382 LOCK_CONTENDED_FLAGS(lock, do_raw_spin_trylock, do_raw_spin_lock,
383 do_raw_spin_lock_flags, &flags);
384 return flags;
385}
386EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested);
387
388void __lockfunc _raw_spin_lock_nest_lock(raw_spinlock_t *lock,
389 struct lockdep_map *nest_lock)
390{
391 preempt_disable();
392 spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
393 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
394}
395EXPORT_SYMBOL(_raw_spin_lock_nest_lock);
396
397#endif
398
399notrace int in_lock_functions(unsigned long addr)
400{
401 /* Linker adds these: start and end of __lockfunc functions */
402 extern char __lock_text_start[], __lock_text_end[];
403
404 return addr >= (unsigned long)__lock_text_start
405 && addr < (unsigned long)__lock_text_end;
406}
407EXPORT_SYMBOL(in_lock_functions);
This page took 0.040727 seconds and 5 git commands to generate.