Commit | Line | Data |
---|---|---|
1965aae3 PA |
1 | #ifndef _ASM_X86_SPINLOCK_H |
2 | #define _ASM_X86_SPINLOCK_H | |
2fed0c50 | 3 | |
96f853ea | 4 | #include <linux/jump_label.h> |
60063497 | 5 | #include <linux/atomic.h> |
1075cf7a TG |
6 | #include <asm/page.h> |
7 | #include <asm/processor.h> | |
314cdbef | 8 | #include <linux/compiler.h> |
74d4affd | 9 | #include <asm/paravirt.h> |
96f853ea JF |
10 | #include <asm/bitops.h> |
11 | ||
1075cf7a TG |
12 | /* |
13 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | |
14 | * | |
15 | * Simple spin lock operations. There are two variants, one clears IRQ's | |
16 | * on the local processor, one does not. | |
17 | * | |
83be4ffa | 18 | * These are fair FIFO ticket locks, which support up to 2^16 CPUs. |
1075cf7a TG |
19 | * |
20 | * (the type definitions are in asm/spinlock_types.h) | |
21 | */ | |
22 | ||
96a388de | 23 | #ifdef CONFIG_X86_32 |
1075cf7a | 24 | # define LOCK_PTR_REG "a" |
96a388de | 25 | #else |
1075cf7a TG |
26 | # define LOCK_PTR_REG "D" |
27 | #endif | |
28 | ||
09df7c4c | 29 | #if defined(CONFIG_X86_32) && (defined(CONFIG_X86_PPRO_FENCE)) |
3a556b26 | 30 | /* |
09df7c4c | 31 | * On PPro SMP, we use a locked operation to unlock |
3a556b26 NP |
32 | * (PPro errata 66, 92) |
33 | */ | |
34 | # define UNLOCK_LOCK_PREFIX LOCK_PREFIX | |
35 | #else | |
36 | # define UNLOCK_LOCK_PREFIX | |
314cdbef NP |
37 | #endif |
38 | ||
545ac138 JF |
39 | /* How long a lock should spin before we consider blocking */ |
40 | #define SPIN_THRESHOLD (1 << 15) | |
41 | ||
96f853ea JF |
42 | extern struct static_key paravirt_ticketlocks_enabled; |
43 | static __always_inline bool static_key_false(struct static_key *key); | |
545ac138 | 44 | |
96f853ea JF |
45 | #ifdef CONFIG_PARAVIRT_SPINLOCKS |
46 | ||
47 | static inline void __ticket_enter_slowpath(arch_spinlock_t *lock) | |
545ac138 | 48 | { |
96f853ea | 49 | set_bit(0, (volatile unsigned long *)&lock->tickets.tail); |
545ac138 JF |
50 | } |
51 | ||
96f853ea JF |
52 | #else /* !CONFIG_PARAVIRT_SPINLOCKS */ |
53 | static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock, | |
54 | __ticket_t ticket) | |
545ac138 JF |
55 | { |
56 | } | |
96f853ea JF |
57 | static inline void __ticket_unlock_kick(arch_spinlock_t *lock, |
58 | __ticket_t ticket) | |
545ac138 | 59 | { |
545ac138 JF |
60 | } |
61 | ||
96f853ea JF |
62 | #endif /* CONFIG_PARAVIRT_SPINLOCKS */ |
63 | ||
bc08b449 LT |
64 | static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) |
65 | { | |
66 | return lock.tickets.head == lock.tickets.tail; | |
67 | } | |
68 | ||
3a556b26 NP |
69 | /* |
70 | * Ticket locks are conceptually two parts, one indicating the current head of | |
71 | * the queue, and the other indicating the current tail. The lock is acquired | |
72 | * by atomically noting the tail and incrementing it by one (thus adding | |
73 | * ourself to the queue and noting our position), then waiting until the head | |
74 | * becomes equal to the the initial value of the tail. | |
75 | * | |
76 | * We use an xadd covering *both* parts of the lock, to increment the tail and | |
77 | * also load the position of the head, which takes care of memory ordering | |
78 | * issues and should be optimal for the uncontended case. Note the tail must be | |
79 | * in the high part, because a wide xadd increment of the low part would carry | |
80 | * up and contaminate the high part. | |
3a556b26 | 81 | */ |
96f853ea | 82 | static __always_inline void arch_spin_lock(arch_spinlock_t *lock) |
1075cf7a | 83 | { |
4a1ed4ca | 84 | register struct __raw_tickets inc = { .tail = TICKET_LOCK_INC }; |
314cdbef | 85 | |
2994488f | 86 | inc = xadd(&lock->tickets, inc); |
96f853ea JF |
87 | if (likely(inc.head == inc.tail)) |
88 | goto out; | |
c576a3ea | 89 | |
96f853ea | 90 | inc.tail &= ~TICKET_SLOWPATH_FLAG; |
c576a3ea | 91 | for (;;) { |
545ac138 JF |
92 | unsigned count = SPIN_THRESHOLD; |
93 | ||
94 | do { | |
96f853ea | 95 | if (ACCESS_ONCE(lock->tickets.head) == inc.tail) |
545ac138 JF |
96 | goto out; |
97 | cpu_relax(); | |
545ac138 JF |
98 | } while (--count); |
99 | __ticket_lock_spinning(lock, inc.tail); | |
c576a3ea | 100 | } |
545ac138 | 101 | out: barrier(); /* make sure nothing creeps before the lock is taken */ |
1075cf7a | 102 | } |
314cdbef | 103 | |
b798df09 | 104 | static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) |
1075cf7a | 105 | { |
229855d6 JF |
106 | arch_spinlock_t old, new; |
107 | ||
108 | old.tickets = ACCESS_ONCE(lock->tickets); | |
96f853ea | 109 | if (old.tickets.head != (old.tickets.tail & ~TICKET_SLOWPATH_FLAG)) |
229855d6 | 110 | return 0; |
314cdbef | 111 | |
4a1ed4ca | 112 | new.head_tail = old.head_tail + (TICKET_LOCK_INC << TICKET_SHIFT); |
229855d6 JF |
113 | |
114 | /* cmpxchg is a full barrier, so nothing can move before it */ | |
115 | return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail; | |
1075cf7a TG |
116 | } |
117 | ||
96f853ea JF |
118 | static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock, |
119 | arch_spinlock_t old) | |
3a556b26 | 120 | { |
96f853ea JF |
121 | arch_spinlock_t new; |
122 | ||
123 | BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS); | |
124 | ||
125 | /* Perform the unlock on the "before" copy */ | |
126 | old.tickets.head += TICKET_LOCK_INC; | |
127 | ||
128 | /* Clear the slowpath flag */ | |
129 | new.head_tail = old.head_tail & ~(TICKET_SLOWPATH_FLAG << TICKET_SHIFT); | |
130 | ||
131 | /* | |
132 | * If the lock is uncontended, clear the flag - use cmpxchg in | |
133 | * case it changes behind our back though. | |
134 | */ | |
135 | if (new.tickets.head != new.tickets.tail || | |
136 | cmpxchg(&lock->head_tail, old.head_tail, | |
137 | new.head_tail) != old.head_tail) { | |
138 | /* | |
139 | * Lock still has someone queued for it, so wake up an | |
140 | * appropriate waiter. | |
141 | */ | |
142 | __ticket_unlock_kick(lock, old.tickets.head); | |
143 | } | |
3a556b26 | 144 | } |
1075cf7a | 145 | |
b798df09 | 146 | static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) |
08f5fcbe | 147 | { |
96f853ea JF |
148 | if (TICKET_SLOWPATH_FLAG && |
149 | static_key_false(¶virt_ticketlocks_enabled)) { | |
150 | arch_spinlock_t prev; | |
08f5fcbe | 151 | |
96f853ea JF |
152 | prev = *lock; |
153 | add_smp(&lock->tickets.head, TICKET_LOCK_INC); | |
08f5fcbe | 154 | |
96f853ea | 155 | /* add_smp() is a full mb() */ |
08f5fcbe | 156 | |
96f853ea JF |
157 | if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG)) |
158 | __ticket_unlock_slowpath(lock, prev); | |
159 | } else | |
160 | __add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX); | |
08f5fcbe | 161 | } |
74d4affd | 162 | |
0199c4e6 | 163 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
74d4affd | 164 | { |
84eb950d | 165 | struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); |
74d4affd | 166 | |
7931d493 | 167 | return tmp.tail != tmp.head; |
74d4affd JF |
168 | } |
169 | ||
b798df09 | 170 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) |
74d4affd | 171 | { |
84eb950d | 172 | struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); |
74d4affd | 173 | |
4a1ed4ca | 174 | return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC; |
74d4affd | 175 | } |
0199c4e6 | 176 | #define arch_spin_is_contended arch_spin_is_contended |
63d3a75d | 177 | |
0199c4e6 | 178 | static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, |
63d3a75d JF |
179 | unsigned long flags) |
180 | { | |
0199c4e6 | 181 | arch_spin_lock(lock); |
63d3a75d JF |
182 | } |
183 | ||
0199c4e6 | 184 | static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) |
1075cf7a | 185 | { |
0199c4e6 | 186 | while (arch_spin_is_locked(lock)) |
1075cf7a TG |
187 | cpu_relax(); |
188 | } | |
189 | ||
190 | /* | |
191 | * Read-write spinlocks, allowing multiple readers | |
192 | * but only one writer. | |
193 | * | |
194 | * NOTE! it is quite common to have readers in interrupts | |
195 | * but no interrupt writers. For those circumstances we | |
196 | * can "mix" irq-safe locks - any writer needs to get a | |
197 | * irq-safe write-lock, but readers can get non-irqsafe | |
198 | * read-locks. | |
199 | * | |
200 | * On x86, we implement read-write locks as a 32-bit counter | |
201 | * with the high bit (sign) being the "contended" bit. | |
202 | */ | |
203 | ||
314cdbef NP |
204 | /** |
205 | * read_can_lock - would read_trylock() succeed? | |
206 | * @lock: the rwlock in question. | |
207 | */ | |
e5931943 | 208 | static inline int arch_read_can_lock(arch_rwlock_t *lock) |
1075cf7a | 209 | { |
a750036f | 210 | return lock->lock > 0; |
1075cf7a TG |
211 | } |
212 | ||
314cdbef NP |
213 | /** |
214 | * write_can_lock - would write_trylock() succeed? | |
215 | * @lock: the rwlock in question. | |
216 | */ | |
e5931943 | 217 | static inline int arch_write_can_lock(arch_rwlock_t *lock) |
1075cf7a | 218 | { |
a750036f | 219 | return lock->write == WRITE_LOCK_CMP; |
1075cf7a TG |
220 | } |
221 | ||
e5931943 | 222 | static inline void arch_read_lock(arch_rwlock_t *rw) |
1075cf7a | 223 | { |
a750036f | 224 | asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t" |
1075cf7a TG |
225 | "jns 1f\n" |
226 | "call __read_lock_failed\n\t" | |
227 | "1:\n" | |
228 | ::LOCK_PTR_REG (rw) : "memory"); | |
229 | } | |
230 | ||
e5931943 | 231 | static inline void arch_write_lock(arch_rwlock_t *rw) |
1075cf7a | 232 | { |
a750036f | 233 | asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t" |
1075cf7a TG |
234 | "jz 1f\n" |
235 | "call __write_lock_failed\n\t" | |
236 | "1:\n" | |
a750036f JB |
237 | ::LOCK_PTR_REG (&rw->write), "i" (RW_LOCK_BIAS) |
238 | : "memory"); | |
1075cf7a TG |
239 | } |
240 | ||
e5931943 | 241 | static inline int arch_read_trylock(arch_rwlock_t *lock) |
1075cf7a | 242 | { |
a750036f | 243 | READ_LOCK_ATOMIC(t) *count = (READ_LOCK_ATOMIC(t) *)lock; |
1075cf7a | 244 | |
a750036f | 245 | if (READ_LOCK_ATOMIC(dec_return)(count) >= 0) |
1075cf7a | 246 | return 1; |
a750036f | 247 | READ_LOCK_ATOMIC(inc)(count); |
1075cf7a TG |
248 | return 0; |
249 | } | |
250 | ||
e5931943 | 251 | static inline int arch_write_trylock(arch_rwlock_t *lock) |
1075cf7a | 252 | { |
a750036f | 253 | atomic_t *count = (atomic_t *)&lock->write; |
1075cf7a | 254 | |
a750036f | 255 | if (atomic_sub_and_test(WRITE_LOCK_CMP, count)) |
1075cf7a | 256 | return 1; |
a750036f | 257 | atomic_add(WRITE_LOCK_CMP, count); |
1075cf7a TG |
258 | return 0; |
259 | } | |
260 | ||
e5931943 | 261 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
1075cf7a | 262 | { |
a750036f JB |
263 | asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0" |
264 | :"+m" (rw->lock) : : "memory"); | |
1075cf7a TG |
265 | } |
266 | ||
e5931943 | 267 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
1075cf7a | 268 | { |
a750036f JB |
269 | asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0" |
270 | : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory"); | |
1075cf7a TG |
271 | } |
272 | ||
e5931943 TG |
273 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
274 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | |
f5f7eac4 | 275 | |
a750036f JB |
276 | #undef READ_LOCK_SIZE |
277 | #undef READ_LOCK_ATOMIC | |
278 | #undef WRITE_LOCK_ADD | |
279 | #undef WRITE_LOCK_SUB | |
280 | #undef WRITE_LOCK_CMP | |
281 | ||
0199c4e6 TG |
282 | #define arch_spin_relax(lock) cpu_relax() |
283 | #define arch_read_relax(lock) cpu_relax() | |
284 | #define arch_write_relax(lock) cpu_relax() | |
1075cf7a | 285 | |
1965aae3 | 286 | #endif /* _ASM_X86_SPINLOCK_H */ |