Merge tag 'mac80211-for-davem-2016-06-09' of git://git.kernel.org/pub/scm/linux/kerne...
[deliverable/linux.git] / include / asm-generic / qspinlock.h
1 /*
2 * Queued spinlock
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
15 * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
16 *
17 * Authors: Waiman Long <waiman.long@hpe.com>
18 */
19 #ifndef __ASM_GENERIC_QSPINLOCK_H
20 #define __ASM_GENERIC_QSPINLOCK_H
21
22 #include <asm-generic/qspinlock_types.h>
23
24 /**
25 * queued_spin_is_locked - is the spinlock locked?
26 * @lock: Pointer to queued spinlock structure
27 * Return: 1 if it is locked, 0 otherwise
28 */
29 static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
30 {
31 /*
32 * queued_spin_lock_slowpath() can ACQUIRE the lock before
33 * issuing the unordered store that sets _Q_LOCKED_VAL.
34 *
35 * See both smp_cond_acquire() sites for more detail.
36 *
37 * This however means that in code like:
38 *
39 * spin_lock(A) spin_lock(B)
40 * spin_unlock_wait(B) spin_is_locked(A)
41 * do_something() do_something()
42 *
43 * Both CPUs can end up running do_something() because the store
44 * setting _Q_LOCKED_VAL will pass through the loads in
45 * spin_unlock_wait() and/or spin_is_locked().
46 *
47 * Avoid this by issuing a full memory barrier between the spin_lock()
48 * and the loads in spin_unlock_wait() and spin_is_locked().
49 *
50 * Note that regular mutual exclusion doesn't care about this
51 * delayed store.
52 */
53 smp_mb();
54 return atomic_read(&lock->val) & _Q_LOCKED_MASK;
55 }
56
57 /**
58 * queued_spin_value_unlocked - is the spinlock structure unlocked?
59 * @lock: queued spinlock structure
60 * Return: 1 if it is unlocked, 0 otherwise
61 *
62 * N.B. Whenever there are tasks waiting for the lock, it is considered
63 * locked wrt the lockref code to avoid lock stealing by the lockref
64 * code and change things underneath the lock. This also allows some
65 * optimizations to be applied without conflict with lockref.
66 */
67 static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
68 {
69 return !atomic_read(&lock.val);
70 }
71
72 /**
73 * queued_spin_is_contended - check if the lock is contended
74 * @lock : Pointer to queued spinlock structure
75 * Return: 1 if lock contended, 0 otherwise
76 */
77 static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
78 {
79 return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
80 }
81 /**
82 * queued_spin_trylock - try to acquire the queued spinlock
83 * @lock : Pointer to queued spinlock structure
84 * Return: 1 if lock acquired, 0 if failed
85 */
86 static __always_inline int queued_spin_trylock(struct qspinlock *lock)
87 {
88 if (!atomic_read(&lock->val) &&
89 (atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL) == 0))
90 return 1;
91 return 0;
92 }
93
94 extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
95
96 /**
97 * queued_spin_lock - acquire a queued spinlock
98 * @lock: Pointer to queued spinlock structure
99 */
100 static __always_inline void queued_spin_lock(struct qspinlock *lock)
101 {
102 u32 val;
103
104 val = atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL);
105 if (likely(val == 0))
106 return;
107 queued_spin_lock_slowpath(lock, val);
108 }
109
110 #ifndef queued_spin_unlock
111 /**
112 * queued_spin_unlock - release a queued spinlock
113 * @lock : Pointer to queued spinlock structure
114 */
115 static __always_inline void queued_spin_unlock(struct qspinlock *lock)
116 {
117 /*
118 * smp_mb__before_atomic() in order to guarantee release semantics
119 */
120 smp_mb__before_atomic();
121 atomic_sub(_Q_LOCKED_VAL, &lock->val);
122 }
123 #endif
124
125 /**
126 * queued_spin_unlock_wait - wait until current lock holder releases the lock
127 * @lock : Pointer to queued spinlock structure
128 *
129 * There is a very slight possibility of live-lock if the lockers keep coming
130 * and the waiter is just unfortunate enough to not see any unlock state.
131 */
132 static inline void queued_spin_unlock_wait(struct qspinlock *lock)
133 {
134 /* See queued_spin_is_locked() */
135 smp_mb();
136 while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
137 cpu_relax();
138 }
139
140 #ifndef virt_spin_lock
141 static __always_inline bool virt_spin_lock(struct qspinlock *lock)
142 {
143 return false;
144 }
145 #endif
146
147 /*
148 * Remapping spinlock architecture specific functions to the corresponding
149 * queued spinlock functions.
150 */
151 #define arch_spin_is_locked(l) queued_spin_is_locked(l)
152 #define arch_spin_is_contended(l) queued_spin_is_contended(l)
153 #define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l)
154 #define arch_spin_lock(l) queued_spin_lock(l)
155 #define arch_spin_trylock(l) queued_spin_trylock(l)
156 #define arch_spin_unlock(l) queued_spin_unlock(l)
157 #define arch_spin_lock_flags(l, f) queued_spin_lock(l)
158 #define arch_spin_unlock_wait(l) queued_spin_unlock_wait(l)
159
160 #endif /* __ASM_GENERIC_QSPINLOCK_H */
This page took 0.036256 seconds and 5 git commands to generate.