Commit | Line | Data |
---|---|---|
fb0527bd | 1 | #include <linux/percpu.h> |
fb0527bd | 2 | #include <linux/sched.h> |
d84b6728 | 3 | #include <linux/osq_lock.h> |
fb0527bd PZ |
4 | |
5 | /* | |
6 | * An MCS like lock especially tailored for optimistic spinning for sleeping | |
7 | * lock implementations (mutex, rwsem, etc). | |
8 | * | |
9 | * Using a single mcs node per CPU is safe because sleeping locks should not be | |
10 | * called from interrupt context and we have preemption disabled while | |
11 | * spinning. | |
12 | */ | |
046a619d | 13 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node); |
fb0527bd | 14 | |
90631822 JL |
15 | /* |
16 | * We use the value 0 to represent "no CPU", thus the encoded value | |
17 | * will be the CPU number incremented by 1. | |
18 | */ | |
19 | static inline int encode_cpu(int cpu_nr) | |
20 | { | |
21 | return cpu_nr + 1; | |
22 | } | |
23 | ||
24 | static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val) | |
25 | { | |
26 | int cpu_nr = encoded_cpu_val - 1; | |
27 | ||
28 | return per_cpu_ptr(&osq_node, cpu_nr); | |
29 | } | |
30 | ||
fb0527bd PZ |
31 | /* |
32 | * Get a stable @node->next pointer, either for unlock() or unqueue() purposes. | |
33 | * Can return NULL in case we were the last queued and we updated @lock instead. | |
34 | */ | |
046a619d | 35 | static inline struct optimistic_spin_node * |
90631822 | 36 | osq_wait_next(struct optimistic_spin_queue *lock, |
046a619d JL |
37 | struct optimistic_spin_node *node, |
38 | struct optimistic_spin_node *prev) | |
fb0527bd | 39 | { |
046a619d | 40 | struct optimistic_spin_node *next = NULL; |
90631822 JL |
41 | int curr = encode_cpu(smp_processor_id()); |
42 | int old; | |
43 | ||
44 | /* | |
45 | * If there is a prev node in queue, then the 'old' value will be | |
46 | * the prev node's CPU #, else it's set to OSQ_UNLOCKED_VAL since if | |
47 | * we're currently last in queue, then the queue will then become empty. | |
48 | */ | |
49 | old = prev ? prev->cpu : OSQ_UNLOCKED_VAL; | |
fb0527bd PZ |
50 | |
51 | for (;;) { | |
90631822 | 52 | if (atomic_read(&lock->tail) == curr && |
c55a6ffa | 53 | atomic_cmpxchg_acquire(&lock->tail, curr, old) == curr) { |
fb0527bd PZ |
54 | /* |
55 | * We were the last queued, we moved @lock back. @prev | |
56 | * will now observe @lock and will complete its | |
57 | * unlock()/unqueue(). | |
58 | */ | |
59 | break; | |
60 | } | |
61 | ||
62 | /* | |
63 | * We must xchg() the @node->next value, because if we were to | |
64 | * leave it in, a concurrent unlock()/unqueue() from | |
65 | * @node->next might complete Step-A and think its @prev is | |
66 | * still valid. | |
67 | * | |
68 | * If the concurrent unlock()/unqueue() wins the race, we'll | |
69 | * wait for either @lock to point to us, through its Step-B, or | |
70 | * wait for a new @node->next from its Step-C. | |
71 | */ | |
72 | if (node->next) { | |
73 | next = xchg(&node->next, NULL); | |
74 | if (next) | |
75 | break; | |
76 | } | |
77 | ||
3a6bfbc9 | 78 | cpu_relax_lowlatency(); |
fb0527bd PZ |
79 | } |
80 | ||
81 | return next; | |
82 | } | |
83 | ||
90631822 | 84 | bool osq_lock(struct optimistic_spin_queue *lock) |
fb0527bd | 85 | { |
046a619d JL |
86 | struct optimistic_spin_node *node = this_cpu_ptr(&osq_node); |
87 | struct optimistic_spin_node *prev, *next; | |
90631822 JL |
88 | int curr = encode_cpu(smp_processor_id()); |
89 | int old; | |
fb0527bd PZ |
90 | |
91 | node->locked = 0; | |
92 | node->next = NULL; | |
90631822 | 93 | node->cpu = curr; |
fb0527bd | 94 | |
c55a6ffa DB |
95 | /* |
96 | * ACQUIRE semantics, pairs with corresponding RELEASE | |
97 | * in unlock() uncontended, or fastpath. | |
98 | */ | |
99 | old = atomic_xchg_acquire(&lock->tail, curr); | |
90631822 | 100 | if (old == OSQ_UNLOCKED_VAL) |
fb0527bd PZ |
101 | return true; |
102 | ||
90631822 JL |
103 | prev = decode_cpu(old); |
104 | node->prev = prev; | |
4d3199e4 | 105 | WRITE_ONCE(prev->next, node); |
fb0527bd PZ |
106 | |
107 | /* | |
108 | * Normally @prev is untouchable after the above store; because at that | |
109 | * moment unlock can proceed and wipe the node element from stack. | |
110 | * | |
111 | * However, since our nodes are static per-cpu storage, we're | |
112 | * guaranteed their existence -- this allows us to apply | |
113 | * cmpxchg in an attempt to undo our queueing. | |
114 | */ | |
115 | ||
4d3199e4 | 116 | while (!READ_ONCE(node->locked)) { |
fb0527bd PZ |
117 | /* |
118 | * If we need to reschedule bail... so we can block. | |
119 | */ | |
120 | if (need_resched()) | |
121 | goto unqueue; | |
122 | ||
3a6bfbc9 | 123 | cpu_relax_lowlatency(); |
fb0527bd PZ |
124 | } |
125 | return true; | |
126 | ||
127 | unqueue: | |
128 | /* | |
129 | * Step - A -- stabilize @prev | |
130 | * | |
131 | * Undo our @prev->next assignment; this will make @prev's | |
132 | * unlock()/unqueue() wait for a next pointer since @lock points to us | |
133 | * (or later). | |
134 | */ | |
135 | ||
136 | for (;;) { | |
137 | if (prev->next == node && | |
138 | cmpxchg(&prev->next, node, NULL) == node) | |
139 | break; | |
140 | ||
141 | /* | |
142 | * We can only fail the cmpxchg() racing against an unlock(), | |
143 | * in which case we should observe @node->locked becomming | |
144 | * true. | |
145 | */ | |
146 | if (smp_load_acquire(&node->locked)) | |
147 | return true; | |
148 | ||
3a6bfbc9 | 149 | cpu_relax_lowlatency(); |
fb0527bd PZ |
150 | |
151 | /* | |
152 | * Or we race against a concurrent unqueue()'s step-B, in which | |
153 | * case its step-C will write us a new @node->prev pointer. | |
154 | */ | |
4d3199e4 | 155 | prev = READ_ONCE(node->prev); |
fb0527bd PZ |
156 | } |
157 | ||
158 | /* | |
159 | * Step - B -- stabilize @next | |
160 | * | |
161 | * Similar to unlock(), wait for @node->next or move @lock from @node | |
162 | * back to @prev. | |
163 | */ | |
164 | ||
165 | next = osq_wait_next(lock, node, prev); | |
166 | if (!next) | |
167 | return false; | |
168 | ||
169 | /* | |
170 | * Step - C -- unlink | |
171 | * | |
172 | * @prev is stable because its still waiting for a new @prev->next | |
173 | * pointer, @next is stable because our @node->next pointer is NULL and | |
174 | * it will wait in Step-A. | |
175 | */ | |
176 | ||
4d3199e4 DB |
177 | WRITE_ONCE(next->prev, prev); |
178 | WRITE_ONCE(prev->next, next); | |
fb0527bd PZ |
179 | |
180 | return false; | |
181 | } | |
182 | ||
90631822 | 183 | void osq_unlock(struct optimistic_spin_queue *lock) |
fb0527bd | 184 | { |
33ecd208 | 185 | struct optimistic_spin_node *node, *next; |
90631822 | 186 | int curr = encode_cpu(smp_processor_id()); |
fb0527bd PZ |
187 | |
188 | /* | |
189 | * Fast path for the uncontended case. | |
190 | */ | |
c55a6ffa DB |
191 | if (likely(atomic_cmpxchg_release(&lock->tail, curr, |
192 | OSQ_UNLOCKED_VAL) == curr)) | |
fb0527bd PZ |
193 | return; |
194 | ||
195 | /* | |
196 | * Second most likely case. | |
197 | */ | |
33ecd208 | 198 | node = this_cpu_ptr(&osq_node); |
fb0527bd PZ |
199 | next = xchg(&node->next, NULL); |
200 | if (next) { | |
4d3199e4 | 201 | WRITE_ONCE(next->locked, 1); |
fb0527bd PZ |
202 | return; |
203 | } | |
204 | ||
205 | next = osq_wait_next(lock, node, NULL); | |
206 | if (next) | |
4d3199e4 | 207 | WRITE_ONCE(next->locked, 1); |
fb0527bd | 208 | } |