Commit | Line | Data |
---|---|---|
e7224674 TC |
1 | /* |
2 | * MCS lock defines | |
3 | * | |
4 | * This file contains the main data structure and API definitions of MCS lock. | |
5 | * | |
6 | * The MCS lock (proposed by Mellor-Crummey and Scott) is a simple spin-lock | |
7 | * with the desirable properties of being fair, and with each cpu trying | |
8 | * to acquire the lock spinning on a local variable. | |
9 | * It avoids expensive cache bouncings that common test-and-set spin-lock | |
10 | * implementations incur. | |
11 | */ | |
12 | #ifndef __LINUX_MCS_SPINLOCK_H | |
13 | #define __LINUX_MCS_SPINLOCK_H | |
14 | ||
15 | struct mcs_spinlock { | |
16 | struct mcs_spinlock *next; | |
17 | int locked; /* 1 if lock acquired */ | |
18 | }; | |
19 | ||
20 | /* | |
21 | * Note: the smp_load_acquire/smp_store_release pair is not | |
22 | * sufficient to form a full memory barrier across | |
23 | * cpus for many architectures (except x86) for mcs_unlock and mcs_lock. | |
24 | * For applications that need a full barrier across multiple cpus | |
25 | * with mcs_unlock and mcs_lock pair, smp_mb__after_unlock_lock() should be | |
26 | * used after mcs_lock. | |
27 | */ | |
5faeb8ad JL |
28 | |
29 | /* | |
30 | * In order to acquire the lock, the caller should declare a local node and | |
31 | * pass a reference of the node to this function in addition to the lock. | |
32 | * If the lock has already been acquired, then this will proceed to spin | |
33 | * on this node->locked until the previous lock holder sets the node->locked | |
34 | * in mcs_spin_unlock(). | |
35 | * | |
36 | * We don't inline mcs_spin_lock() so that perf can correctly account for the | |
37 | * time spent in this lock function. | |
38 | */ | |
e7224674 TC |
39 | static inline |
40 | void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node) | |
41 | { | |
42 | struct mcs_spinlock *prev; | |
43 | ||
44 | /* Init node */ | |
45 | node->locked = 0; | |
46 | node->next = NULL; | |
47 | ||
48 | prev = xchg(lock, node); | |
49 | if (likely(prev == NULL)) { | |
5faeb8ad JL |
50 | /* |
51 | * Lock acquired, don't need to set node->locked to 1. Threads | |
52 | * only spin on its own node->locked value for lock acquisition. | |
53 | * However, since this thread can immediately acquire the lock | |
54 | * and does not proceed to spin on its own node->locked, this | |
55 | * value won't be used. If a debug mode is needed to | |
56 | * audit lock status, then set node->locked value here. | |
57 | */ | |
e7224674 TC |
58 | return; |
59 | } | |
60 | ACCESS_ONCE(prev->next) = node; | |
61 | /* | |
62 | * Wait until the lock holder passes the lock down. | |
63 | * Using smp_load_acquire() provides a memory barrier that | |
64 | * ensures subsequent operations happen after the lock is acquired. | |
65 | */ | |
66 | while (!(smp_load_acquire(&node->locked))) | |
67 | arch_mutex_cpu_relax(); | |
68 | } | |
69 | ||
5faeb8ad JL |
70 | /* |
71 | * Releases the lock. The caller should pass in the corresponding node that | |
72 | * was used to acquire the lock. | |
73 | */ | |
e7224674 TC |
74 | static inline |
75 | void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node) | |
76 | { | |
77 | struct mcs_spinlock *next = ACCESS_ONCE(node->next); | |
78 | ||
79 | if (likely(!next)) { | |
80 | /* | |
81 | * Release the lock by setting it to NULL | |
82 | */ | |
5faeb8ad | 83 | if (likely(cmpxchg(lock, node, NULL) == node)) |
e7224674 TC |
84 | return; |
85 | /* Wait until the next pointer is set */ | |
86 | while (!(next = ACCESS_ONCE(node->next))) | |
87 | arch_mutex_cpu_relax(); | |
88 | } | |
89 | /* | |
90 | * Pass lock to next waiter. | |
91 | * smp_store_release() provides a memory barrier to ensure | |
92 | * all operations in the critical section has been completed | |
93 | * before unlocking. | |
94 | */ | |
95 | smp_store_release(&next->locked, 1); | |
96 | } | |
97 | ||
98 | #endif /* __LINUX_MCS_SPINLOCK_H */ |