Commit | Line | Data |
---|---|---|
a23db284 WL |
1 | #ifndef _GEN_PV_LOCK_SLOWPATH |
2 | #error "do not include this file" | |
3 | #endif | |
4 | ||
5 | #include <linux/hash.h> | |
6 | #include <linux/bootmem.h> | |
cba77f03 | 7 | #include <linux/debug_locks.h> |
a23db284 WL |
8 | |
9 | /* | |
10 | * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead | |
11 | * of spinning them. | |
12 | * | |
13 | * This relies on the architecture to provide two paravirt hypercalls: | |
14 | * | |
15 | * pv_wait(u8 *ptr, u8 val) -- suspends the vcpu if *ptr == val | |
16 | * pv_kick(cpu) -- wakes a suspended vcpu | |
17 | * | |
18 | * Using these we implement __pv_queued_spin_lock_slowpath() and | |
19 | * __pv_queued_spin_unlock() to replace native_queued_spin_lock_slowpath() and | |
20 | * native_queued_spin_unlock(). | |
21 | */ | |
22 | ||
23 | #define _Q_SLOW_VAL (3U << _Q_LOCKED_OFFSET) | |
24 | ||
cd0272fa WL |
25 | /* |
26 | * Queue Node Adaptive Spinning | |
27 | * | |
28 | * A queue node vCPU will stop spinning if the vCPU in the previous node is | |
29 | * not running. The one lock stealing attempt allowed at slowpath entry | |
30 | * mitigates the slight slowdown for non-overcommitted guest with this | |
31 | * aggressive wait-early mechanism. | |
32 | * | |
33 | * The status of the previous node will be checked at fixed interval | |
34 | * controlled by PV_PREV_CHECK_MASK. This is to ensure that we won't | |
35 | * pound on the cacheline of the previous node too heavily. | |
36 | */ | |
37 | #define PV_PREV_CHECK_MASK 0xff | |
38 | ||
75d22702 WL |
39 | /* |
40 | * Queue node uses: vcpu_running & vcpu_halted. | |
41 | * Queue head uses: vcpu_running & vcpu_hashed. | |
42 | */ | |
a23db284 WL |
43 | enum vcpu_state { |
44 | vcpu_running = 0, | |
75d22702 WL |
45 | vcpu_halted, /* Used only in pv_wait_node */ |
46 | vcpu_hashed, /* = pv_hash'ed + vcpu_halted */ | |
a23db284 WL |
47 | }; |
48 | ||
49 | struct pv_node { | |
50 | struct mcs_spinlock mcs; | |
51 | struct mcs_spinlock __res[3]; | |
52 | ||
53 | int cpu; | |
54 | u8 state; | |
55 | }; | |
56 | ||
1c4941fd WL |
57 | /* |
58 | * By replacing the regular queued_spin_trylock() with the function below, | |
59 | * it will be called once when a lock waiter enter the PV slowpath before | |
60 | * being queued. By allowing one lock stealing attempt here when the pending | |
61 | * bit is off, it helps to reduce the performance impact of lock waiter | |
62 | * preemption without the drawback of lock starvation. | |
63 | */ | |
64 | #define queued_spin_trylock(l) pv_queued_spin_steal_lock(l) | |
65 | static inline bool pv_queued_spin_steal_lock(struct qspinlock *lock) | |
66 | { | |
67 | struct __qspinlock *l = (void *)lock; | |
68 | ||
69 | return !(atomic_read(&lock->val) & _Q_LOCKED_PENDING_MASK) && | |
70 | (cmpxchg(&l->locked, 0, _Q_LOCKED_VAL) == 0); | |
71 | } | |
72 | ||
73 | /* | |
74 | * The pending bit is used by the queue head vCPU to indicate that it | |
75 | * is actively spinning on the lock and no lock stealing is allowed. | |
76 | */ | |
77 | #if _Q_PENDING_BITS == 8 | |
78 | static __always_inline void set_pending(struct qspinlock *lock) | |
79 | { | |
80 | struct __qspinlock *l = (void *)lock; | |
81 | ||
82 | WRITE_ONCE(l->pending, 1); | |
83 | } | |
84 | ||
85 | static __always_inline void clear_pending(struct qspinlock *lock) | |
86 | { | |
87 | struct __qspinlock *l = (void *)lock; | |
88 | ||
89 | WRITE_ONCE(l->pending, 0); | |
90 | } | |
91 | ||
92 | /* | |
93 | * The pending bit check in pv_queued_spin_steal_lock() isn't a memory | |
94 | * barrier. Therefore, an atomic cmpxchg() is used to acquire the lock | |
95 | * just to be sure that it will get it. | |
96 | */ | |
97 | static __always_inline int trylock_clear_pending(struct qspinlock *lock) | |
98 | { | |
99 | struct __qspinlock *l = (void *)lock; | |
100 | ||
101 | return !READ_ONCE(l->locked) && | |
102 | (cmpxchg(&l->locked_pending, _Q_PENDING_VAL, _Q_LOCKED_VAL) | |
103 | == _Q_PENDING_VAL); | |
104 | } | |
105 | #else /* _Q_PENDING_BITS == 8 */ | |
106 | static __always_inline void set_pending(struct qspinlock *lock) | |
107 | { | |
108 | atomic_set_mask(_Q_PENDING_VAL, &lock->val); | |
109 | } | |
110 | ||
111 | static __always_inline void clear_pending(struct qspinlock *lock) | |
112 | { | |
113 | atomic_clear_mask(_Q_PENDING_VAL, &lock->val); | |
114 | } | |
115 | ||
116 | static __always_inline int trylock_clear_pending(struct qspinlock *lock) | |
117 | { | |
118 | int val = atomic_read(&lock->val); | |
119 | ||
120 | for (;;) { | |
121 | int old, new; | |
122 | ||
123 | if (val & _Q_LOCKED_MASK) | |
124 | break; | |
125 | ||
126 | /* | |
127 | * Try to clear pending bit & set locked bit | |
128 | */ | |
129 | old = val; | |
130 | new = (val & ~_Q_PENDING_MASK) | _Q_LOCKED_VAL; | |
131 | val = atomic_cmpxchg(&lock->val, old, new); | |
132 | ||
133 | if (val == old) | |
134 | return 1; | |
135 | } | |
136 | return 0; | |
137 | } | |
138 | #endif /* _Q_PENDING_BITS == 8 */ | |
139 | ||
45e898b7 WL |
140 | /* |
141 | * Include queued spinlock statistics code | |
142 | */ | |
143 | #include "qspinlock_stat.h" | |
144 | ||
a23db284 WL |
145 | /* |
146 | * Lock and MCS node addresses hash table for fast lookup | |
147 | * | |
148 | * Hashing is done on a per-cacheline basis to minimize the need to access | |
149 | * more than one cacheline. | |
150 | * | |
151 | * Dynamically allocate a hash table big enough to hold at least 4X the | |
152 | * number of possible cpus in the system. Allocation is done on page | |
153 | * granularity. So the minimum number of hash buckets should be at least | |
154 | * 256 (64-bit) or 512 (32-bit) to fully utilize a 4k page. | |
155 | * | |
156 | * Since we should not be holding locks from NMI context (very rare indeed) the | |
157 | * max load factor is 0.75, which is around the point where open addressing | |
158 | * breaks down. | |
159 | * | |
160 | */ | |
161 | struct pv_hash_entry { | |
162 | struct qspinlock *lock; | |
163 | struct pv_node *node; | |
164 | }; | |
165 | ||
166 | #define PV_HE_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_entry)) | |
167 | #define PV_HE_MIN (PAGE_SIZE / sizeof(struct pv_hash_entry)) | |
168 | ||
169 | static struct pv_hash_entry *pv_lock_hash; | |
170 | static unsigned int pv_lock_hash_bits __read_mostly; | |
171 | ||
172 | /* | |
173 | * Allocate memory for the PV qspinlock hash buckets | |
174 | * | |
175 | * This function should be called from the paravirt spinlock initialization | |
176 | * routine. | |
177 | */ | |
178 | void __init __pv_init_lock_hash(void) | |
179 | { | |
180 | int pv_hash_size = ALIGN(4 * num_possible_cpus(), PV_HE_PER_LINE); | |
181 | ||
182 | if (pv_hash_size < PV_HE_MIN) | |
183 | pv_hash_size = PV_HE_MIN; | |
184 | ||
185 | /* | |
186 | * Allocate space from bootmem which should be page-size aligned | |
187 | * and hence cacheline aligned. | |
188 | */ | |
189 | pv_lock_hash = alloc_large_system_hash("PV qspinlock", | |
190 | sizeof(struct pv_hash_entry), | |
191 | pv_hash_size, 0, HASH_EARLY, | |
192 | &pv_lock_hash_bits, NULL, | |
193 | pv_hash_size, pv_hash_size); | |
194 | } | |
195 | ||
196 | #define for_each_hash_entry(he, offset, hash) \ | |
197 | for (hash &= ~(PV_HE_PER_LINE - 1), he = &pv_lock_hash[hash], offset = 0; \ | |
198 | offset < (1 << pv_lock_hash_bits); \ | |
199 | offset++, he = &pv_lock_hash[(hash + offset) & ((1 << pv_lock_hash_bits) - 1)]) | |
200 | ||
201 | static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node) | |
202 | { | |
203 | unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits); | |
204 | struct pv_hash_entry *he; | |
45e898b7 | 205 | int hopcnt = 0; |
a23db284 WL |
206 | |
207 | for_each_hash_entry(he, offset, hash) { | |
45e898b7 | 208 | hopcnt++; |
a23db284 WL |
209 | if (!cmpxchg(&he->lock, NULL, lock)) { |
210 | WRITE_ONCE(he->node, node); | |
45e898b7 | 211 | qstat_hop(hopcnt); |
a23db284 WL |
212 | return &he->lock; |
213 | } | |
214 | } | |
215 | /* | |
216 | * Hard assume there is a free entry for us. | |
217 | * | |
218 | * This is guaranteed by ensuring every blocked lock only ever consumes | |
219 | * a single entry, and since we only have 4 nesting levels per CPU | |
220 | * and allocated 4*nr_possible_cpus(), this must be so. | |
221 | * | |
222 | * The single entry is guaranteed by having the lock owner unhash | |
223 | * before it releases. | |
224 | */ | |
225 | BUG(); | |
226 | } | |
227 | ||
228 | static struct pv_node *pv_unhash(struct qspinlock *lock) | |
229 | { | |
230 | unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits); | |
231 | struct pv_hash_entry *he; | |
232 | struct pv_node *node; | |
233 | ||
234 | for_each_hash_entry(he, offset, hash) { | |
235 | if (READ_ONCE(he->lock) == lock) { | |
236 | node = READ_ONCE(he->node); | |
237 | WRITE_ONCE(he->lock, NULL); | |
238 | return node; | |
239 | } | |
240 | } | |
241 | /* | |
242 | * Hard assume we'll find an entry. | |
243 | * | |
244 | * This guarantees a limited lookup time and is itself guaranteed by | |
245 | * having the lock owner do the unhash -- IFF the unlock sees the | |
246 | * SLOW flag, there MUST be a hash entry. | |
247 | */ | |
248 | BUG(); | |
249 | } | |
250 | ||
cd0272fa WL |
251 | /* |
252 | * Return true if when it is time to check the previous node which is not | |
253 | * in a running state. | |
254 | */ | |
255 | static inline bool | |
256 | pv_wait_early(struct pv_node *prev, int loop) | |
257 | { | |
258 | ||
259 | if ((loop & PV_PREV_CHECK_MASK) != 0) | |
260 | return false; | |
261 | ||
262 | return READ_ONCE(prev->state) != vcpu_running; | |
263 | } | |
264 | ||
a23db284 WL |
265 | /* |
266 | * Initialize the PV part of the mcs_spinlock node. | |
267 | */ | |
268 | static void pv_init_node(struct mcs_spinlock *node) | |
269 | { | |
270 | struct pv_node *pn = (struct pv_node *)node; | |
271 | ||
272 | BUILD_BUG_ON(sizeof(struct pv_node) > 5*sizeof(struct mcs_spinlock)); | |
273 | ||
274 | pn->cpu = smp_processor_id(); | |
275 | pn->state = vcpu_running; | |
276 | } | |
277 | ||
278 | /* | |
279 | * Wait for node->locked to become true, halt the vcpu after a short spin. | |
75d22702 WL |
280 | * pv_kick_node() is used to set _Q_SLOW_VAL and fill in hash table on its |
281 | * behalf. | |
a23db284 | 282 | */ |
cd0272fa | 283 | static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev) |
a23db284 WL |
284 | { |
285 | struct pv_node *pn = (struct pv_node *)node; | |
cd0272fa | 286 | struct pv_node *pp = (struct pv_node *)prev; |
45e898b7 | 287 | int waitcnt = 0; |
a23db284 | 288 | int loop; |
cd0272fa | 289 | bool wait_early; |
a23db284 | 290 | |
45e898b7 WL |
291 | /* waitcnt processing will be compiled out if !QUEUED_LOCK_STAT */ |
292 | for (;; waitcnt++) { | |
cd0272fa | 293 | for (wait_early = false, loop = SPIN_THRESHOLD; loop; loop--) { |
a23db284 WL |
294 | if (READ_ONCE(node->locked)) |
295 | return; | |
cd0272fa WL |
296 | if (pv_wait_early(pp, loop)) { |
297 | wait_early = true; | |
298 | break; | |
299 | } | |
a23db284 WL |
300 | cpu_relax(); |
301 | } | |
302 | ||
303 | /* | |
304 | * Order pn->state vs pn->locked thusly: | |
305 | * | |
306 | * [S] pn->state = vcpu_halted [S] next->locked = 1 | |
307 | * MB MB | |
75d22702 | 308 | * [L] pn->locked [RmW] pn->state = vcpu_hashed |
a23db284 | 309 | * |
75d22702 | 310 | * Matches the cmpxchg() from pv_kick_node(). |
a23db284 | 311 | */ |
b92b8b35 | 312 | smp_store_mb(pn->state, vcpu_halted); |
a23db284 | 313 | |
45e898b7 WL |
314 | if (!READ_ONCE(node->locked)) { |
315 | qstat_inc(qstat_pv_wait_node, true); | |
316 | qstat_inc(qstat_pv_wait_again, waitcnt); | |
cd0272fa | 317 | qstat_inc(qstat_pv_wait_early, wait_early); |
a23db284 | 318 | pv_wait(&pn->state, vcpu_halted); |
45e898b7 | 319 | } |
a23db284 WL |
320 | |
321 | /* | |
45e898b7 | 322 | * If pv_kick_node() changed us to vcpu_hashed, retain that |
1c4941fd WL |
323 | * value so that pv_wait_head_or_lock() knows to not also try |
324 | * to hash this lock. | |
a23db284 | 325 | */ |
75d22702 | 326 | cmpxchg(&pn->state, vcpu_halted, vcpu_running); |
a23db284 WL |
327 | |
328 | /* | |
329 | * If the locked flag is still not set after wakeup, it is a | |
330 | * spurious wakeup and the vCPU should wait again. However, | |
331 | * there is a pretty high overhead for CPU halting and kicking. | |
332 | * So it is better to spin for a while in the hope that the | |
333 | * MCS lock will be released soon. | |
334 | */ | |
45e898b7 | 335 | qstat_inc(qstat_pv_spurious_wakeup, !READ_ONCE(node->locked)); |
a23db284 | 336 | } |
75d22702 | 337 | |
a23db284 WL |
338 | /* |
339 | * By now our node->locked should be 1 and our caller will not actually | |
340 | * spin-wait for it. We do however rely on our caller to do a | |
341 | * load-acquire for us. | |
342 | */ | |
343 | } | |
344 | ||
345 | /* | |
75d22702 WL |
346 | * Called after setting next->locked = 1 when we're the lock owner. |
347 | * | |
1c4941fd WL |
348 | * Instead of waking the waiters stuck in pv_wait_node() advance their state |
349 | * such that they're waiting in pv_wait_head_or_lock(), this avoids a | |
350 | * wake/sleep cycle. | |
a23db284 | 351 | */ |
75d22702 | 352 | static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node) |
a23db284 WL |
353 | { |
354 | struct pv_node *pn = (struct pv_node *)node; | |
75d22702 | 355 | struct __qspinlock *l = (void *)lock; |
a23db284 WL |
356 | |
357 | /* | |
75d22702 WL |
358 | * If the vCPU is indeed halted, advance its state to match that of |
359 | * pv_wait_node(). If OTOH this fails, the vCPU was running and will | |
360 | * observe its next->locked value and advance itself. | |
a23db284 | 361 | * |
75d22702 WL |
362 | * Matches with smp_store_mb() and cmpxchg() in pv_wait_node() |
363 | */ | |
364 | if (cmpxchg(&pn->state, vcpu_halted, vcpu_hashed) != vcpu_halted) | |
365 | return; | |
366 | ||
367 | /* | |
368 | * Put the lock into the hash table and set the _Q_SLOW_VAL. | |
a23db284 | 369 | * |
75d22702 WL |
370 | * As this is the same vCPU that will check the _Q_SLOW_VAL value and |
371 | * the hash table later on at unlock time, no atomic instruction is | |
372 | * needed. | |
a23db284 | 373 | */ |
75d22702 WL |
374 | WRITE_ONCE(l->locked, _Q_SLOW_VAL); |
375 | (void)pv_hash(lock, pn); | |
a23db284 WL |
376 | } |
377 | ||
378 | /* | |
1c4941fd WL |
379 | * Wait for l->locked to become clear and acquire the lock; |
380 | * halt the vcpu after a short spin. | |
a23db284 | 381 | * __pv_queued_spin_unlock() will wake us. |
1c4941fd WL |
382 | * |
383 | * The current value of the lock will be returned for additional processing. | |
a23db284 | 384 | */ |
1c4941fd WL |
385 | static u32 |
386 | pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node) | |
a23db284 WL |
387 | { |
388 | struct pv_node *pn = (struct pv_node *)node; | |
389 | struct __qspinlock *l = (void *)lock; | |
390 | struct qspinlock **lp = NULL; | |
45e898b7 | 391 | int waitcnt = 0; |
a23db284 WL |
392 | int loop; |
393 | ||
75d22702 WL |
394 | /* |
395 | * If pv_kick_node() already advanced our state, we don't need to | |
396 | * insert ourselves into the hash table anymore. | |
397 | */ | |
398 | if (READ_ONCE(pn->state) == vcpu_hashed) | |
399 | lp = (struct qspinlock **)1; | |
400 | ||
45e898b7 | 401 | for (;; waitcnt++) { |
cd0272fa WL |
402 | /* |
403 | * Set correct vCPU state to be used by queue node wait-early | |
404 | * mechanism. | |
405 | */ | |
406 | WRITE_ONCE(pn->state, vcpu_running); | |
407 | ||
1c4941fd WL |
408 | /* |
409 | * Set the pending bit in the active lock spinning loop to | |
410 | * disable lock stealing before attempting to acquire the lock. | |
411 | */ | |
412 | set_pending(lock); | |
a23db284 | 413 | for (loop = SPIN_THRESHOLD; loop; loop--) { |
1c4941fd WL |
414 | if (trylock_clear_pending(lock)) |
415 | goto gotlock; | |
a23db284 WL |
416 | cpu_relax(); |
417 | } | |
1c4941fd WL |
418 | clear_pending(lock); |
419 | ||
a23db284 | 420 | |
a23db284 WL |
421 | if (!lp) { /* ONCE */ |
422 | lp = pv_hash(lock, pn); | |
75d22702 | 423 | |
a23db284 | 424 | /* |
3b3fdf10 WD |
425 | * We must hash before setting _Q_SLOW_VAL, such that |
426 | * when we observe _Q_SLOW_VAL in __pv_queued_spin_unlock() | |
427 | * we'll be sure to be able to observe our hash entry. | |
a23db284 | 428 | * |
3b3fdf10 WD |
429 | * [S] <hash> [Rmw] l->locked == _Q_SLOW_VAL |
430 | * MB RMB | |
431 | * [RmW] l->locked = _Q_SLOW_VAL [L] <unhash> | |
a23db284 | 432 | * |
3b3fdf10 | 433 | * Matches the smp_rmb() in __pv_queued_spin_unlock(). |
a23db284 | 434 | */ |
1c4941fd | 435 | if (xchg(&l->locked, _Q_SLOW_VAL) == 0) { |
a23db284 | 436 | /* |
1c4941fd WL |
437 | * The lock was free and now we own the lock. |
438 | * Change the lock value back to _Q_LOCKED_VAL | |
439 | * and unhash the table. | |
a23db284 | 440 | */ |
1c4941fd | 441 | WRITE_ONCE(l->locked, _Q_LOCKED_VAL); |
a23db284 | 442 | WRITE_ONCE(*lp, NULL); |
1c4941fd | 443 | goto gotlock; |
a23db284 WL |
444 | } |
445 | } | |
cd0272fa | 446 | WRITE_ONCE(pn->state, vcpu_halted); |
45e898b7 WL |
447 | qstat_inc(qstat_pv_wait_head, true); |
448 | qstat_inc(qstat_pv_wait_again, waitcnt); | |
a23db284 WL |
449 | pv_wait(&l->locked, _Q_SLOW_VAL); |
450 | ||
451 | /* | |
452 | * The unlocker should have freed the lock before kicking the | |
453 | * CPU. So if the lock is still not free, it is a spurious | |
1c4941fd WL |
454 | * wakeup or another vCPU has stolen the lock. The current |
455 | * vCPU should spin again. | |
a23db284 | 456 | */ |
1c4941fd | 457 | qstat_inc(qstat_pv_spurious_wakeup, READ_ONCE(l->locked)); |
a23db284 WL |
458 | } |
459 | ||
460 | /* | |
1c4941fd WL |
461 | * The cmpxchg() or xchg() call before coming here provides the |
462 | * acquire semantics for locking. The dummy ORing of _Q_LOCKED_VAL | |
463 | * here is to indicate to the compiler that the value will always | |
464 | * be nozero to enable better code optimization. | |
a23db284 | 465 | */ |
1c4941fd WL |
466 | gotlock: |
467 | return (u32)(atomic_read(&lock->val) | _Q_LOCKED_VAL); | |
a23db284 WL |
468 | } |
469 | ||
470 | /* | |
d7804530 WL |
471 | * PV versions of the unlock fastpath and slowpath functions to be used |
472 | * instead of queued_spin_unlock(). | |
a23db284 | 473 | */ |
d7804530 WL |
474 | __visible void |
475 | __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked) | |
a23db284 WL |
476 | { |
477 | struct __qspinlock *l = (void *)lock; | |
478 | struct pv_node *node; | |
a23db284 | 479 | |
0b792bf5 PZ |
480 | if (unlikely(locked != _Q_SLOW_VAL)) { |
481 | WARN(!debug_locks_silent, | |
482 | "pvqspinlock: lock 0x%lx has corrupted value 0x%x!\n", | |
483 | (unsigned long)lock, atomic_read(&lock->val)); | |
cba77f03 WL |
484 | return; |
485 | } | |
486 | ||
3b3fdf10 WD |
487 | /* |
488 | * A failed cmpxchg doesn't provide any memory-ordering guarantees, | |
489 | * so we need a barrier to order the read of the node data in | |
490 | * pv_unhash *after* we've read the lock being _Q_SLOW_VAL. | |
491 | * | |
1c4941fd | 492 | * Matches the cmpxchg() in pv_wait_head_or_lock() setting _Q_SLOW_VAL. |
3b3fdf10 WD |
493 | */ |
494 | smp_rmb(); | |
495 | ||
a23db284 WL |
496 | /* |
497 | * Since the above failed to release, this must be the SLOW path. | |
498 | * Therefore start by looking up the blocked node and unhashing it. | |
499 | */ | |
500 | node = pv_unhash(lock); | |
501 | ||
502 | /* | |
503 | * Now that we have a reference to the (likely) blocked pv_node, | |
504 | * release the lock. | |
505 | */ | |
506 | smp_store_release(&l->locked, 0); | |
507 | ||
508 | /* | |
509 | * At this point the memory pointed at by lock can be freed/reused, | |
510 | * however we can still use the pv_node to kick the CPU. | |
75d22702 WL |
511 | * The other vCPU may not really be halted, but kicking an active |
512 | * vCPU is harmless other than the additional latency in completing | |
513 | * the unlock. | |
a23db284 | 514 | */ |
45e898b7 | 515 | qstat_inc(qstat_pv_kick_unlock, true); |
93edc8bd | 516 | pv_kick(node->cpu); |
a23db284 | 517 | } |
d7804530 | 518 | |
a23db284 WL |
519 | /* |
520 | * Include the architecture specific callee-save thunk of the | |
521 | * __pv_queued_spin_unlock(). This thunk is put together with | |
d7804530 WL |
522 | * __pv_queued_spin_unlock() to make the callee-save thunk and the real unlock |
523 | * function close to each other sharing consecutive instruction cachelines. | |
524 | * Alternatively, architecture specific version of __pv_queued_spin_unlock() | |
525 | * can be defined. | |
a23db284 WL |
526 | */ |
527 | #include <asm/qspinlock_paravirt.h> | |
528 | ||
d7804530 WL |
529 | #ifndef __pv_queued_spin_unlock |
530 | __visible void __pv_queued_spin_unlock(struct qspinlock *lock) | |
531 | { | |
532 | struct __qspinlock *l = (void *)lock; | |
533 | u8 locked; | |
534 | ||
535 | /* | |
536 | * We must not unlock if SLOW, because in that case we must first | |
537 | * unhash. Otherwise it would be possible to have multiple @lock | |
538 | * entries, which would be BAD. | |
539 | */ | |
540 | locked = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0); | |
541 | if (likely(locked == _Q_LOCKED_VAL)) | |
542 | return; | |
543 | ||
544 | __pv_queued_spin_unlock_slowpath(lock, locked); | |
545 | } | |
546 | #endif /* __pv_queued_spin_unlock */ |