locking/qspinlock: Use _acquire/_release() versions of cmpxchg() & xchg()
[deliverable/linux.git] / kernel / locking / qspinlock.c
1 /*
2 * Queued spinlock
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
15 * (C) Copyright 2013-2014 Red Hat, Inc.
16 * (C) Copyright 2015 Intel Corp.
17 * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
18 *
19 * Authors: Waiman Long <waiman.long@hpe.com>
20 * Peter Zijlstra <peterz@infradead.org>
21 */
22
23 #ifndef _GEN_PV_LOCK_SLOWPATH
24
25 #include <linux/smp.h>
26 #include <linux/bug.h>
27 #include <linux/cpumask.h>
28 #include <linux/percpu.h>
29 #include <linux/hardirq.h>
30 #include <linux/mutex.h>
31 #include <asm/byteorder.h>
32 #include <asm/qspinlock.h>
33
34 /*
35 * The basic principle of a queue-based spinlock can best be understood
36 * by studying a classic queue-based spinlock implementation called the
37 * MCS lock. The paper below provides a good description for this kind
38 * of lock.
39 *
40 * http://www.cise.ufl.edu/tr/DOC/REP-1992-71.pdf
41 *
42 * This queued spinlock implementation is based on the MCS lock, however to make
43 * it fit the 4 bytes we assume spinlock_t to be, and preserve its existing
44 * API, we must modify it somehow.
45 *
46 * In particular; where the traditional MCS lock consists of a tail pointer
47 * (8 bytes) and needs the next pointer (another 8 bytes) of its own node to
48 * unlock the next pending (next->locked), we compress both these: {tail,
49 * next->locked} into a single u32 value.
50 *
51 * Since a spinlock disables recursion of its own context and there is a limit
52 * to the contexts that can nest; namely: task, softirq, hardirq, nmi. As there
53 * are at most 4 nesting levels, it can be encoded by a 2-bit number. Now
54 * we can encode the tail by combining the 2-bit nesting level with the cpu
55 * number. With one byte for the lock value and 3 bytes for the tail, only a
56 * 32-bit word is now needed. Even though we only need 1 bit for the lock,
57 * we extend it to a full byte to achieve better performance for architectures
58 * that support atomic byte write.
59 *
60 * We also change the first spinner to spin on the lock bit instead of its
61 * node; whereby avoiding the need to carry a node from lock to unlock, and
62 * preserving existing lock API. This also makes the unlock code simpler and
63 * faster.
64 *
65 * N.B. The current implementation only supports architectures that allow
66 * atomic operations on smaller 8-bit and 16-bit data types.
67 *
68 */
69
70 #include "mcs_spinlock.h"
71
72 #ifdef CONFIG_PARAVIRT_SPINLOCKS
73 #define MAX_NODES 8
74 #else
75 #define MAX_NODES 4
76 #endif
77
78 /*
79 * Per-CPU queue node structures; we can never have more than 4 nested
80 * contexts: task, softirq, hardirq, nmi.
81 *
82 * Exactly fits one 64-byte cacheline on a 64-bit architecture.
83 *
84 * PV doubles the storage and uses the second cacheline for PV state.
85 */
86 static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[MAX_NODES]);
87
88 /*
89 * We must be able to distinguish between no-tail and the tail at 0:0,
90 * therefore increment the cpu number by one.
91 */
92
93 static inline u32 encode_tail(int cpu, int idx)
94 {
95 u32 tail;
96
97 #ifdef CONFIG_DEBUG_SPINLOCK
98 BUG_ON(idx > 3);
99 #endif
100 tail = (cpu + 1) << _Q_TAIL_CPU_OFFSET;
101 tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */
102
103 return tail;
104 }
105
106 static inline struct mcs_spinlock *decode_tail(u32 tail)
107 {
108 int cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1;
109 int idx = (tail & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET;
110
111 return per_cpu_ptr(&mcs_nodes[idx], cpu);
112 }
113
114 #define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK)
115
116 /*
117 * By using the whole 2nd least significant byte for the pending bit, we
118 * can allow better optimization of the lock acquisition for the pending
119 * bit holder.
120 *
121 * This internal structure is also used by the set_locked function which
122 * is not restricted to _Q_PENDING_BITS == 8.
123 */
124 struct __qspinlock {
125 union {
126 atomic_t val;
127 #ifdef __LITTLE_ENDIAN
128 struct {
129 u8 locked;
130 u8 pending;
131 };
132 struct {
133 u16 locked_pending;
134 u16 tail;
135 };
136 #else
137 struct {
138 u16 tail;
139 u16 locked_pending;
140 };
141 struct {
142 u8 reserved[2];
143 u8 pending;
144 u8 locked;
145 };
146 #endif
147 };
148 };
149
150 #if _Q_PENDING_BITS == 8
151 /**
152 * clear_pending_set_locked - take ownership and clear the pending bit.
153 * @lock: Pointer to queued spinlock structure
154 *
155 * *,1,0 -> *,0,1
156 *
157 * Lock stealing is not allowed if this function is used.
158 */
159 static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
160 {
161 struct __qspinlock *l = (void *)lock;
162
163 WRITE_ONCE(l->locked_pending, _Q_LOCKED_VAL);
164 }
165
166 /*
167 * xchg_tail - Put in the new queue tail code word & retrieve previous one
168 * @lock : Pointer to queued spinlock structure
169 * @tail : The new queue tail code word
170 * Return: The previous queue tail code word
171 *
172 * xchg(lock, tail)
173 *
174 * p,*,* -> n,*,* ; prev = xchg(lock, node)
175 */
176 static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
177 {
178 struct __qspinlock *l = (void *)lock;
179
180 /*
181 * Use release semantics to make sure that the MCS node is properly
182 * initialized before changing the tail code.
183 */
184 return (u32)xchg_release(&l->tail,
185 tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET;
186 }
187
188 #else /* _Q_PENDING_BITS == 8 */
189
190 /**
191 * clear_pending_set_locked - take ownership and clear the pending bit.
192 * @lock: Pointer to queued spinlock structure
193 *
194 * *,1,0 -> *,0,1
195 */
196 static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
197 {
198 atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val);
199 }
200
201 /**
202 * xchg_tail - Put in the new queue tail code word & retrieve previous one
203 * @lock : Pointer to queued spinlock structure
204 * @tail : The new queue tail code word
205 * Return: The previous queue tail code word
206 *
207 * xchg(lock, tail)
208 *
209 * p,*,* -> n,*,* ; prev = xchg(lock, node)
210 */
211 static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
212 {
213 u32 old, new, val = atomic_read(&lock->val);
214
215 for (;;) {
216 new = (val & _Q_LOCKED_PENDING_MASK) | tail;
217 /*
218 * Use release semantics to make sure that the MCS node is
219 * properly initialized before changing the tail code.
220 */
221 old = atomic_cmpxchg_release(&lock->val, val, new);
222 if (old == val)
223 break;
224
225 val = old;
226 }
227 return old;
228 }
229 #endif /* _Q_PENDING_BITS == 8 */
230
231 /**
232 * set_locked - Set the lock bit and own the lock
233 * @lock: Pointer to queued spinlock structure
234 *
235 * *,*,0 -> *,0,1
236 */
237 static __always_inline void set_locked(struct qspinlock *lock)
238 {
239 struct __qspinlock *l = (void *)lock;
240
241 WRITE_ONCE(l->locked, _Q_LOCKED_VAL);
242 }
243
244
245 /*
246 * Generate the native code for queued_spin_unlock_slowpath(); provide NOPs for
247 * all the PV callbacks.
248 */
249
250 static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
251 static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { }
252 static __always_inline void __pv_kick_node(struct qspinlock *lock,
253 struct mcs_spinlock *node) { }
254 static __always_inline void __pv_wait_head(struct qspinlock *lock,
255 struct mcs_spinlock *node) { }
256
257 #define pv_enabled() false
258
259 #define pv_init_node __pv_init_node
260 #define pv_wait_node __pv_wait_node
261 #define pv_kick_node __pv_kick_node
262 #define pv_wait_head __pv_wait_head
263
264 #ifdef CONFIG_PARAVIRT_SPINLOCKS
265 #define queued_spin_lock_slowpath native_queued_spin_lock_slowpath
266 #endif
267
268 #endif /* _GEN_PV_LOCK_SLOWPATH */
269
270 /**
271 * queued_spin_lock_slowpath - acquire the queued spinlock
272 * @lock: Pointer to queued spinlock structure
273 * @val: Current value of the queued spinlock 32-bit word
274 *
275 * (queue tail, pending bit, lock value)
276 *
277 * fast : slow : unlock
278 * : :
279 * uncontended (0,0,0) -:--> (0,0,1) ------------------------------:--> (*,*,0)
280 * : | ^--------.------. / :
281 * : v \ \ | :
282 * pending : (0,1,1) +--> (0,1,0) \ | :
283 * : | ^--' | | :
284 * : v | | :
285 * uncontended : (n,x,y) +--> (n,0,0) --' | :
286 * queue : | ^--' | :
287 * : v | :
288 * contended : (*,x,y) +--> (*,0,0) ---> (*,0,1) -' :
289 * queue : ^--' :
290 */
291 void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
292 {
293 struct mcs_spinlock *prev, *next, *node;
294 u32 new, old, tail;
295 int idx;
296
297 BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
298
299 if (pv_enabled())
300 goto queue;
301
302 if (virt_spin_lock(lock))
303 return;
304
305 /*
306 * wait for in-progress pending->locked hand-overs
307 *
308 * 0,1,0 -> 0,0,1
309 */
310 if (val == _Q_PENDING_VAL) {
311 while ((val = atomic_read(&lock->val)) == _Q_PENDING_VAL)
312 cpu_relax();
313 }
314
315 /*
316 * trylock || pending
317 *
318 * 0,0,0 -> 0,0,1 ; trylock
319 * 0,0,1 -> 0,1,1 ; pending
320 */
321 for (;;) {
322 /*
323 * If we observe any contention; queue.
324 */
325 if (val & ~_Q_LOCKED_MASK)
326 goto queue;
327
328 new = _Q_LOCKED_VAL;
329 if (val == new)
330 new |= _Q_PENDING_VAL;
331
332 /*
333 * Acquire semantic is required here as the function may
334 * return immediately if the lock was free.
335 */
336 old = atomic_cmpxchg_acquire(&lock->val, val, new);
337 if (old == val)
338 break;
339
340 val = old;
341 }
342
343 /*
344 * we won the trylock
345 */
346 if (new == _Q_LOCKED_VAL)
347 return;
348
349 /*
350 * we're pending, wait for the owner to go away.
351 *
352 * *,1,1 -> *,1,0
353 *
354 * this wait loop must be a load-acquire such that we match the
355 * store-release that clears the locked bit and create lock
356 * sequentiality; this is because not all clear_pending_set_locked()
357 * implementations imply full barriers.
358 */
359 while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKED_MASK)
360 cpu_relax();
361
362 /*
363 * take ownership and clear the pending bit.
364 *
365 * *,1,0 -> *,0,1
366 */
367 clear_pending_set_locked(lock);
368 return;
369
370 /*
371 * End of pending bit optimistic spinning and beginning of MCS
372 * queuing.
373 */
374 queue:
375 node = this_cpu_ptr(&mcs_nodes[0]);
376 idx = node->count++;
377 tail = encode_tail(smp_processor_id(), idx);
378
379 node += idx;
380 node->locked = 0;
381 node->next = NULL;
382 pv_init_node(node);
383
384 /*
385 * We touched a (possibly) cold cacheline in the per-cpu queue node;
386 * attempt the trylock once more in the hope someone let go while we
387 * weren't watching.
388 */
389 if (queued_spin_trylock(lock))
390 goto release;
391
392 /*
393 * We have already touched the queueing cacheline; don't bother with
394 * pending stuff.
395 *
396 * p,*,* -> n,*,*
397 */
398 old = xchg_tail(lock, tail);
399
400 /*
401 * if there was a previous node; link it and wait until reaching the
402 * head of the waitqueue.
403 */
404 if (old & _Q_TAIL_MASK) {
405 prev = decode_tail(old);
406 WRITE_ONCE(prev->next, node);
407
408 pv_wait_node(node);
409 arch_mcs_spin_lock_contended(&node->locked);
410 }
411
412 /*
413 * we're at the head of the waitqueue, wait for the owner & pending to
414 * go away.
415 *
416 * *,x,y -> *,0,0
417 *
418 * this wait loop must use a load-acquire such that we match the
419 * store-release that clears the locked bit and create lock
420 * sequentiality; this is because the set_locked() function below
421 * does not imply a full barrier.
422 *
423 */
424 pv_wait_head(lock, node);
425 while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKED_PENDING_MASK)
426 cpu_relax();
427
428 /*
429 * claim the lock:
430 *
431 * n,0,0 -> 0,0,1 : lock, uncontended
432 * *,0,0 -> *,0,1 : lock, contended
433 *
434 * If the queue head is the only one in the queue (lock value == tail),
435 * clear the tail code and grab the lock. Otherwise, we only need
436 * to grab the lock.
437 */
438 for (;;) {
439 if (val != tail) {
440 set_locked(lock);
441 break;
442 }
443 /*
444 * The smp_load_acquire() call above has provided the necessary
445 * acquire semantics required for locking. At most two
446 * iterations of this loop may be ran.
447 */
448 old = atomic_cmpxchg_relaxed(&lock->val, val, _Q_LOCKED_VAL);
449 if (old == val)
450 goto release; /* No contention */
451
452 val = old;
453 }
454
455 /*
456 * contended path; wait for next, release.
457 */
458 while (!(next = READ_ONCE(node->next)))
459 cpu_relax();
460
461 arch_mcs_spin_unlock_contended(&next->locked);
462 pv_kick_node(lock, next);
463
464 release:
465 /*
466 * release the node
467 */
468 this_cpu_dec(mcs_nodes[0].count);
469 }
470 EXPORT_SYMBOL(queued_spin_lock_slowpath);
471
472 /*
473 * Generate the paravirt code for queued_spin_unlock_slowpath().
474 */
475 #if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS)
476 #define _GEN_PV_LOCK_SLOWPATH
477
478 #undef pv_enabled
479 #define pv_enabled() true
480
481 #undef pv_init_node
482 #undef pv_wait_node
483 #undef pv_kick_node
484 #undef pv_wait_head
485
486 #undef queued_spin_lock_slowpath
487 #define queued_spin_lock_slowpath __pv_queued_spin_lock_slowpath
488
489 #include "qspinlock_paravirt.h"
490 #include "qspinlock.c"
491
492 #endif
This page took 0.042204 seconds and 5 git commands to generate.