Commit | Line | Data |
---|---|---|
9390ef0c | 1 | #include <linux/atomic.h> |
a1fd3e24 ON |
2 | #include <linux/rwsem.h> |
3 | #include <linux/percpu.h> | |
4 | #include <linux/wait.h> | |
8ebe3473 | 5 | #include <linux/lockdep.h> |
a1fd3e24 ON |
6 | #include <linux/percpu-rwsem.h> |
7 | #include <linux/rcupdate.h> | |
8 | #include <linux/sched.h> | |
9 | #include <linux/errno.h> | |
10 | ||
8ebe3473 ON |
11 | int __percpu_init_rwsem(struct percpu_rw_semaphore *brw, |
12 | const char *name, struct lock_class_key *rwsem_key) | |
a1fd3e24 ON |
13 | { |
14 | brw->fast_read_ctr = alloc_percpu(int); | |
15 | if (unlikely(!brw->fast_read_ctr)) | |
16 | return -ENOMEM; | |
17 | ||
8ebe3473 ON |
18 | /* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */ |
19 | __init_rwsem(&brw->rw_sem, name, rwsem_key); | |
9390ef0c | 20 | atomic_set(&brw->write_ctr, 0); |
a1fd3e24 ON |
21 | atomic_set(&brw->slow_read_ctr, 0); |
22 | init_waitqueue_head(&brw->write_waitq); | |
23 | return 0; | |
24 | } | |
25 | ||
26 | void percpu_free_rwsem(struct percpu_rw_semaphore *brw) | |
27 | { | |
28 | free_percpu(brw->fast_read_ctr); | |
29 | brw->fast_read_ctr = NULL; /* catch use after free bugs */ | |
30 | } | |
31 | ||
32 | /* | |
33 | * This is the fast-path for down_read/up_read, it only needs to ensure | |
9390ef0c | 34 | * there is no pending writer (atomic_read(write_ctr) == 0) and inc/dec the |
a1fd3e24 ON |
35 | * fast per-cpu counter. The writer uses synchronize_sched_expedited() to |
36 | * serialize with the preempt-disabled section below. | |
37 | * | |
38 | * The nontrivial part is that we should guarantee acquire/release semantics | |
39 | * in case when | |
40 | * | |
41 | * R_W: down_write() comes after up_read(), the writer should see all | |
42 | * changes done by the reader | |
43 | * or | |
44 | * W_R: down_read() comes after up_write(), the reader should see all | |
45 | * changes done by the writer | |
46 | * | |
47 | * If this helper fails the callers rely on the normal rw_semaphore and | |
48 | * atomic_dec_and_test(), so in this case we have the necessary barriers. | |
49 | * | |
9390ef0c | 50 | * But if it succeeds we do not have any barriers, atomic_read(write_ctr) or |
a1fd3e24 ON |
51 | * __this_cpu_add() below can be reordered with any LOAD/STORE done by the |
52 | * reader inside the critical section. See the comments in down_write and | |
53 | * up_write below. | |
54 | */ | |
55 | static bool update_fast_ctr(struct percpu_rw_semaphore *brw, unsigned int val) | |
56 | { | |
57 | bool success = false; | |
58 | ||
59 | preempt_disable(); | |
9390ef0c | 60 | if (likely(!atomic_read(&brw->write_ctr))) { |
a1fd3e24 ON |
61 | __this_cpu_add(*brw->fast_read_ctr, val); |
62 | success = true; | |
63 | } | |
64 | preempt_enable(); | |
65 | ||
66 | return success; | |
67 | } | |
68 | ||
69 | /* | |
70 | * Like the normal down_read() this is not recursive, the writer can | |
71 | * come after the first percpu_down_read() and create the deadlock. | |
8ebe3473 ON |
72 | * |
73 | * Note: returns with lock_is_held(brw->rw_sem) == T for lockdep, | |
74 | * percpu_up_read() does rwsem_release(). This pairs with the usage | |
75 | * of ->rw_sem in percpu_down/up_write(). | |
a1fd3e24 ON |
76 | */ |
77 | void percpu_down_read(struct percpu_rw_semaphore *brw) | |
78 | { | |
8ebe3473 ON |
79 | might_sleep(); |
80 | if (likely(update_fast_ctr(brw, +1))) { | |
81 | rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 0, _RET_IP_); | |
a1fd3e24 | 82 | return; |
8ebe3473 | 83 | } |
a1fd3e24 ON |
84 | |
85 | down_read(&brw->rw_sem); | |
86 | atomic_inc(&brw->slow_read_ctr); | |
8ebe3473 ON |
87 | /* avoid up_read()->rwsem_release() */ |
88 | __up_read(&brw->rw_sem); | |
a1fd3e24 ON |
89 | } |
90 | ||
9287f692 ON |
91 | int percpu_down_read_trylock(struct percpu_rw_semaphore *brw) |
92 | { | |
93 | if (unlikely(!update_fast_ctr(brw, +1))) { | |
94 | if (!__down_read_trylock(&brw->rw_sem)) | |
95 | return 0; | |
96 | atomic_inc(&brw->slow_read_ctr); | |
97 | __up_read(&brw->rw_sem); | |
98 | } | |
99 | ||
100 | rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 1, _RET_IP_); | |
101 | return 1; | |
102 | } | |
103 | ||
a1fd3e24 ON |
104 | void percpu_up_read(struct percpu_rw_semaphore *brw) |
105 | { | |
8ebe3473 ON |
106 | rwsem_release(&brw->rw_sem.dep_map, 1, _RET_IP_); |
107 | ||
a1fd3e24 ON |
108 | if (likely(update_fast_ctr(brw, -1))) |
109 | return; | |
110 | ||
111 | /* false-positive is possible but harmless */ | |
112 | if (atomic_dec_and_test(&brw->slow_read_ctr)) | |
113 | wake_up_all(&brw->write_waitq); | |
114 | } | |
115 | ||
116 | static int clear_fast_ctr(struct percpu_rw_semaphore *brw) | |
117 | { | |
118 | unsigned int sum = 0; | |
119 | int cpu; | |
120 | ||
121 | for_each_possible_cpu(cpu) { | |
122 | sum += per_cpu(*brw->fast_read_ctr, cpu); | |
123 | per_cpu(*brw->fast_read_ctr, cpu) = 0; | |
124 | } | |
125 | ||
126 | return sum; | |
127 | } | |
128 | ||
129 | /* | |
9390ef0c ON |
130 | * A writer increments ->write_ctr to force the readers to switch to the |
131 | * slow mode, note the atomic_read() check in update_fast_ctr(). | |
a1fd3e24 ON |
132 | * |
133 | * After that the readers can only inc/dec the slow ->slow_read_ctr counter, | |
134 | * ->fast_read_ctr is stable. Once the writer moves its sum into the slow | |
135 | * counter it represents the number of active readers. | |
136 | * | |
137 | * Finally the writer takes ->rw_sem for writing and blocks the new readers, | |
138 | * then waits until the slow counter becomes zero. | |
139 | */ | |
140 | void percpu_down_write(struct percpu_rw_semaphore *brw) | |
141 | { | |
9390ef0c ON |
142 | /* tell update_fast_ctr() there is a pending writer */ |
143 | atomic_inc(&brw->write_ctr); | |
a1fd3e24 | 144 | /* |
9390ef0c | 145 | * 1. Ensures that write_ctr != 0 is visible to any down_read/up_read |
a1fd3e24 ON |
146 | * so that update_fast_ctr() can't succeed. |
147 | * | |
148 | * 2. Ensures we see the result of every previous this_cpu_add() in | |
149 | * update_fast_ctr(). | |
150 | * | |
151 | * 3. Ensures that if any reader has exited its critical section via | |
152 | * fast-path, it executes a full memory barrier before we return. | |
153 | * See R_W case in the comment above update_fast_ctr(). | |
154 | */ | |
155 | synchronize_sched_expedited(); | |
156 | ||
9390ef0c ON |
157 | /* exclude other writers, and block the new readers completely */ |
158 | down_write(&brw->rw_sem); | |
159 | ||
a1fd3e24 ON |
160 | /* nobody can use fast_read_ctr, move its sum into slow_read_ctr */ |
161 | atomic_add(clear_fast_ctr(brw), &brw->slow_read_ctr); | |
162 | ||
a1fd3e24 ON |
163 | /* wait for all readers to complete their percpu_up_read() */ |
164 | wait_event(brw->write_waitq, !atomic_read(&brw->slow_read_ctr)); | |
165 | } | |
166 | ||
167 | void percpu_up_write(struct percpu_rw_semaphore *brw) | |
168 | { | |
9390ef0c | 169 | /* release the lock, but the readers can't use the fast-path */ |
a1fd3e24 | 170 | up_write(&brw->rw_sem); |
a1fd3e24 ON |
171 | /* |
172 | * Insert the barrier before the next fast-path in down_read, | |
173 | * see W_R case in the comment above update_fast_ctr(). | |
174 | */ | |
175 | synchronize_sched_expedited(); | |
9390ef0c ON |
176 | /* the last writer unblocks update_fast_ctr() */ |
177 | atomic_dec(&brw->write_ctr); | |
a1fd3e24 | 178 | } |