Merge tag 'sound-3.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[deliverable/linux.git] / arch / s390 / lib / spinlock.c
1 /*
2 * Out of line spinlock code.
3 *
4 * Copyright IBM Corp. 2004, 2006
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6 */
7
8 #include <linux/types.h>
9 #include <linux/module.h>
10 #include <linux/spinlock.h>
11 #include <linux/init.h>
12 #include <linux/smp.h>
13 #include <asm/io.h>
14
15 int spin_retry = 1000;
16
17 /**
18 * spin_retry= parameter
19 */
20 static int __init spin_retry_setup(char *str)
21 {
22 spin_retry = simple_strtoul(str, &str, 0);
23 return 1;
24 }
25 __setup("spin_retry=", spin_retry_setup);
26
27 void arch_spin_lock_wait(arch_spinlock_t *lp)
28 {
29 unsigned int cpu = SPINLOCK_LOCKVAL;
30 unsigned int owner;
31 int count;
32
33 while (1) {
34 owner = ACCESS_ONCE(lp->lock);
35 /* Try to get the lock if it is free. */
36 if (!owner) {
37 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
38 return;
39 continue;
40 }
41 /* Check if the lock owner is running. */
42 if (!smp_vcpu_scheduled(~owner)) {
43 smp_yield_cpu(~owner);
44 continue;
45 }
46 /* Loop for a while on the lock value. */
47 count = spin_retry;
48 do {
49 owner = ACCESS_ONCE(lp->lock);
50 } while (owner && count-- > 0);
51 if (!owner)
52 continue;
53 /*
54 * For multiple layers of hypervisors, e.g. z/VM + LPAR
55 * yield the CPU if the lock is still unavailable.
56 */
57 if (!MACHINE_IS_LPAR)
58 smp_yield_cpu(~owner);
59 }
60 }
61 EXPORT_SYMBOL(arch_spin_lock_wait);
62
63 void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
64 {
65 unsigned int cpu = SPINLOCK_LOCKVAL;
66 unsigned int owner;
67 int count;
68
69 local_irq_restore(flags);
70 while (1) {
71 owner = ACCESS_ONCE(lp->lock);
72 /* Try to get the lock if it is free. */
73 if (!owner) {
74 local_irq_disable();
75 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
76 return;
77 local_irq_restore(flags);
78 }
79 /* Check if the lock owner is running. */
80 if (!smp_vcpu_scheduled(~owner)) {
81 smp_yield_cpu(~owner);
82 continue;
83 }
84 /* Loop for a while on the lock value. */
85 count = spin_retry;
86 do {
87 owner = ACCESS_ONCE(lp->lock);
88 } while (owner && count-- > 0);
89 if (!owner)
90 continue;
91 /*
92 * For multiple layers of hypervisors, e.g. z/VM + LPAR
93 * yield the CPU if the lock is still unavailable.
94 */
95 if (!MACHINE_IS_LPAR)
96 smp_yield_cpu(~owner);
97 }
98 }
99 EXPORT_SYMBOL(arch_spin_lock_wait_flags);
100
101 void arch_spin_relax(arch_spinlock_t *lp)
102 {
103 unsigned int cpu = lp->lock;
104 if (cpu != 0) {
105 if (MACHINE_IS_VM || MACHINE_IS_KVM ||
106 !smp_vcpu_scheduled(~cpu))
107 smp_yield_cpu(~cpu);
108 }
109 }
110 EXPORT_SYMBOL(arch_spin_relax);
111
112 int arch_spin_trylock_retry(arch_spinlock_t *lp)
113 {
114 int count;
115
116 for (count = spin_retry; count > 0; count--)
117 if (arch_spin_trylock_once(lp))
118 return 1;
119 return 0;
120 }
121 EXPORT_SYMBOL(arch_spin_trylock_retry);
122
123 void _raw_read_lock_wait(arch_rwlock_t *rw)
124 {
125 unsigned int old;
126 int count = spin_retry;
127
128 while (1) {
129 if (count-- <= 0) {
130 smp_yield();
131 count = spin_retry;
132 }
133 old = ACCESS_ONCE(rw->lock);
134 if ((int) old < 0)
135 continue;
136 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
137 return;
138 }
139 }
140 EXPORT_SYMBOL(_raw_read_lock_wait);
141
142 void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
143 {
144 unsigned int old;
145 int count = spin_retry;
146
147 local_irq_restore(flags);
148 while (1) {
149 if (count-- <= 0) {
150 smp_yield();
151 count = spin_retry;
152 }
153 old = ACCESS_ONCE(rw->lock);
154 if ((int) old < 0)
155 continue;
156 local_irq_disable();
157 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
158 return;
159 local_irq_restore(flags);
160 }
161 }
162 EXPORT_SYMBOL(_raw_read_lock_wait_flags);
163
164 int _raw_read_trylock_retry(arch_rwlock_t *rw)
165 {
166 unsigned int old;
167 int count = spin_retry;
168
169 while (count-- > 0) {
170 old = ACCESS_ONCE(rw->lock);
171 if ((int) old < 0)
172 continue;
173 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
174 return 1;
175 }
176 return 0;
177 }
178 EXPORT_SYMBOL(_raw_read_trylock_retry);
179
180 void _raw_write_lock_wait(arch_rwlock_t *rw)
181 {
182 unsigned int old;
183 int count = spin_retry;
184
185 while (1) {
186 if (count-- <= 0) {
187 smp_yield();
188 count = spin_retry;
189 }
190 old = ACCESS_ONCE(rw->lock);
191 if (old)
192 continue;
193 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
194 return;
195 }
196 }
197 EXPORT_SYMBOL(_raw_write_lock_wait);
198
199 void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
200 {
201 unsigned int old;
202 int count = spin_retry;
203
204 local_irq_restore(flags);
205 while (1) {
206 if (count-- <= 0) {
207 smp_yield();
208 count = spin_retry;
209 }
210 old = ACCESS_ONCE(rw->lock);
211 if (old)
212 continue;
213 local_irq_disable();
214 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
215 return;
216 local_irq_restore(flags);
217 }
218 }
219 EXPORT_SYMBOL(_raw_write_lock_wait_flags);
220
221 int _raw_write_trylock_retry(arch_rwlock_t *rw)
222 {
223 unsigned int old;
224 int count = spin_retry;
225
226 while (count-- > 0) {
227 old = ACCESS_ONCE(rw->lock);
228 if (old)
229 continue;
230 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
231 return 1;
232 }
233 return 0;
234 }
235 EXPORT_SYMBOL(_raw_write_trylock_retry);
This page took 0.03797 seconds and 5 git commands to generate.