1 #include <linux/percpu.h>
2 #include <linux/sched.h>
3 #include "mcs_spinlock.h"
8 * An MCS like lock especially tailored for optimistic spinning for sleeping
9 * lock implementations (mutex, rwsem, etc).
11 * Using a single mcs node per CPU is safe because sleeping locks should not be
12 * called from interrupt context and we have preemption disabled while
15 static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node
, osq_node
);
18 * We use the value 0 to represent "no CPU", thus the encoded value
19 * will be the CPU number incremented by 1.
21 static inline int encode_cpu(int cpu_nr
)
26 static inline struct optimistic_spin_node
*decode_cpu(int encoded_cpu_val
)
28 int cpu_nr
= encoded_cpu_val
- 1;
30 return per_cpu_ptr(&osq_node
, cpu_nr
);
34 * Get a stable @node->next pointer, either for unlock() or unqueue() purposes.
35 * Can return NULL in case we were the last queued and we updated @lock instead.
37 static inline struct optimistic_spin_node
*
38 osq_wait_next(struct optimistic_spin_queue
*lock
,
39 struct optimistic_spin_node
*node
,
40 struct optimistic_spin_node
*prev
)
42 struct optimistic_spin_node
*next
= NULL
;
43 int curr
= encode_cpu(smp_processor_id());
47 * If there is a prev node in queue, then the 'old' value will be
48 * the prev node's CPU #, else it's set to OSQ_UNLOCKED_VAL since if
49 * we're currently last in queue, then the queue will then become empty.
51 old
= prev
? prev
->cpu
: OSQ_UNLOCKED_VAL
;
54 if (atomic_read(&lock
->tail
) == curr
&&
55 atomic_cmpxchg(&lock
->tail
, curr
, old
) == curr
) {
57 * We were the last queued, we moved @lock back. @prev
58 * will now observe @lock and will complete its
65 * We must xchg() the @node->next value, because if we were to
66 * leave it in, a concurrent unlock()/unqueue() from
67 * @node->next might complete Step-A and think its @prev is
70 * If the concurrent unlock()/unqueue() wins the race, we'll
71 * wait for either @lock to point to us, through its Step-B, or
72 * wait for a new @node->next from its Step-C.
75 next
= xchg(&node
->next
, NULL
);
80 cpu_relax_lowlatency();
86 bool osq_lock(struct optimistic_spin_queue
*lock
)
88 struct optimistic_spin_node
*node
= this_cpu_ptr(&osq_node
);
89 struct optimistic_spin_node
*prev
, *next
;
90 int curr
= encode_cpu(smp_processor_id());
97 old
= atomic_xchg(&lock
->tail
, curr
);
98 if (old
== OSQ_UNLOCKED_VAL
)
101 prev
= decode_cpu(old
);
103 ACCESS_ONCE(prev
->next
) = node
;
106 * Normally @prev is untouchable after the above store; because at that
107 * moment unlock can proceed and wipe the node element from stack.
109 * However, since our nodes are static per-cpu storage, we're
110 * guaranteed their existence -- this allows us to apply
111 * cmpxchg in an attempt to undo our queueing.
114 while (!smp_load_acquire(&node
->locked
)) {
116 * If we need to reschedule bail... so we can block.
121 cpu_relax_lowlatency();
127 * Step - A -- stabilize @prev
129 * Undo our @prev->next assignment; this will make @prev's
130 * unlock()/unqueue() wait for a next pointer since @lock points to us
135 if (prev
->next
== node
&&
136 cmpxchg(&prev
->next
, node
, NULL
) == node
)
140 * We can only fail the cmpxchg() racing against an unlock(),
141 * in which case we should observe @node->locked becomming
144 if (smp_load_acquire(&node
->locked
))
147 cpu_relax_lowlatency();
150 * Or we race against a concurrent unqueue()'s step-B, in which
151 * case its step-C will write us a new @node->prev pointer.
153 prev
= ACCESS_ONCE(node
->prev
);
157 * Step - B -- stabilize @next
159 * Similar to unlock(), wait for @node->next or move @lock from @node
163 next
= osq_wait_next(lock
, node
, prev
);
170 * @prev is stable because its still waiting for a new @prev->next
171 * pointer, @next is stable because our @node->next pointer is NULL and
172 * it will wait in Step-A.
175 ACCESS_ONCE(next
->prev
) = prev
;
176 ACCESS_ONCE(prev
->next
) = next
;
181 void osq_unlock(struct optimistic_spin_queue
*lock
)
183 struct optimistic_spin_node
*node
, *next
;
184 int curr
= encode_cpu(smp_processor_id());
187 * Fast path for the uncontended case.
189 if (likely(atomic_cmpxchg(&lock
->tail
, curr
, OSQ_UNLOCKED_VAL
) == curr
))
193 * Second most likely case.
195 node
= this_cpu_ptr(&osq_node
);
196 next
= xchg(&node
->next
, NULL
);
198 ACCESS_ONCE(next
->locked
) = 1;
202 next
= osq_wait_next(lock
, node
, NULL
);
204 ACCESS_ONCE(next
->locked
) = 1;
This page took 0.040533 seconds and 6 git commands to generate.