Merge 3.12-rc3 into char-misc-next
[deliverable/linux.git] / arch / s390 / include / asm / spinlock.h
1 /*
2 * S390 version
3 * Copyright IBM Corp. 1999
4 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
5 *
6 * Derived from "include/asm-i386/spinlock.h"
7 */
8
9 #ifndef __ASM_SPINLOCK_H
10 #define __ASM_SPINLOCK_H
11
12 #include <linux/smp.h>
13
14 extern int spin_retry;
15
16 static inline int
17 _raw_compare_and_swap(volatile unsigned int *lock,
18 unsigned int old, unsigned int new)
19 {
20 asm volatile(
21 " cs %0,%3,%1"
22 : "=d" (old), "=Q" (*lock)
23 : "0" (old), "d" (new), "Q" (*lock)
24 : "cc", "memory" );
25 return old;
26 }
27
28 /*
29 * Simple spin lock operations. There are two variants, one clears IRQ's
30 * on the local processor, one does not.
31 *
32 * We make no fairness assumptions. They have a cost.
33 *
34 * (the type definitions are in asm/spinlock_types.h)
35 */
36
37 #define arch_spin_is_locked(x) ((x)->owner_cpu != 0)
38 #define arch_spin_unlock_wait(lock) \
39 do { while (arch_spin_is_locked(lock)) \
40 arch_spin_relax(lock); } while (0)
41
42 extern void arch_spin_lock_wait(arch_spinlock_t *);
43 extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
44 extern int arch_spin_trylock_retry(arch_spinlock_t *);
45 extern void arch_spin_relax(arch_spinlock_t *lock);
46
47 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
48 {
49 return lock.owner_cpu == 0;
50 }
51
52 static inline void arch_spin_lock(arch_spinlock_t *lp)
53 {
54 int old;
55
56 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
57 if (likely(old == 0))
58 return;
59 arch_spin_lock_wait(lp);
60 }
61
62 static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
63 unsigned long flags)
64 {
65 int old;
66
67 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
68 if (likely(old == 0))
69 return;
70 arch_spin_lock_wait_flags(lp, flags);
71 }
72
73 static inline int arch_spin_trylock(arch_spinlock_t *lp)
74 {
75 int old;
76
77 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
78 if (likely(old == 0))
79 return 1;
80 return arch_spin_trylock_retry(lp);
81 }
82
83 static inline void arch_spin_unlock(arch_spinlock_t *lp)
84 {
85 _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0);
86 }
87
88 /*
89 * Read-write spinlocks, allowing multiple readers
90 * but only one writer.
91 *
92 * NOTE! it is quite common to have readers in interrupts
93 * but no interrupt writers. For those circumstances we
94 * can "mix" irq-safe locks - any writer needs to get a
95 * irq-safe write-lock, but readers can get non-irqsafe
96 * read-locks.
97 */
98
99 /**
100 * read_can_lock - would read_trylock() succeed?
101 * @lock: the rwlock in question.
102 */
103 #define arch_read_can_lock(x) ((int)(x)->lock >= 0)
104
105 /**
106 * write_can_lock - would write_trylock() succeed?
107 * @lock: the rwlock in question.
108 */
109 #define arch_write_can_lock(x) ((x)->lock == 0)
110
111 extern void _raw_read_lock_wait(arch_rwlock_t *lp);
112 extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
113 extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
114 extern void _raw_write_lock_wait(arch_rwlock_t *lp);
115 extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
116 extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
117
118 static inline void arch_read_lock(arch_rwlock_t *rw)
119 {
120 unsigned int old;
121 old = rw->lock & 0x7fffffffU;
122 if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old)
123 _raw_read_lock_wait(rw);
124 }
125
126 static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags)
127 {
128 unsigned int old;
129 old = rw->lock & 0x7fffffffU;
130 if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old)
131 _raw_read_lock_wait_flags(rw, flags);
132 }
133
134 static inline void arch_read_unlock(arch_rwlock_t *rw)
135 {
136 unsigned int old, cmp;
137
138 old = rw->lock;
139 do {
140 cmp = old;
141 old = _raw_compare_and_swap(&rw->lock, old, old - 1);
142 } while (cmp != old);
143 }
144
145 static inline void arch_write_lock(arch_rwlock_t *rw)
146 {
147 if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
148 _raw_write_lock_wait(rw);
149 }
150
151 static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
152 {
153 if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
154 _raw_write_lock_wait_flags(rw, flags);
155 }
156
157 static inline void arch_write_unlock(arch_rwlock_t *rw)
158 {
159 _raw_compare_and_swap(&rw->lock, 0x80000000, 0);
160 }
161
162 static inline int arch_read_trylock(arch_rwlock_t *rw)
163 {
164 unsigned int old;
165 old = rw->lock & 0x7fffffffU;
166 if (likely(_raw_compare_and_swap(&rw->lock, old, old + 1) == old))
167 return 1;
168 return _raw_read_trylock_retry(rw);
169 }
170
171 static inline int arch_write_trylock(arch_rwlock_t *rw)
172 {
173 if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0))
174 return 1;
175 return _raw_write_trylock_retry(rw);
176 }
177
178 #define arch_read_relax(lock) cpu_relax()
179 #define arch_write_relax(lock) cpu_relax()
180
181 #endif /* __ASM_SPINLOCK_H */
This page took 0.03694 seconds and 5 git commands to generate.