Commit | Line | Data |
---|---|---|
621934ee PM |
1 | /* |
2 | * Sleepable Read-Copy Update mechanism for mutual exclusion. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
87de1cfd PM |
15 | * along with this program; if not, you can access it online at |
16 | * http://www.gnu.org/licenses/gpl-2.0.html. | |
621934ee PM |
17 | * |
18 | * Copyright (C) IBM Corporation, 2006 | |
4e87b2d7 | 19 | * Copyright (C) Fujitsu, 2012 |
621934ee PM |
20 | * |
21 | * Author: Paul McKenney <paulmck@us.ibm.com> | |
4e87b2d7 | 22 | * Lai Jiangshan <laijs@cn.fujitsu.com> |
621934ee PM |
23 | * |
24 | * For detailed explanation of Read-Copy Update mechanism see - | |
25 | * Documentation/RCU/ *.txt | |
26 | * | |
27 | */ | |
28 | ||
9984de1a | 29 | #include <linux/export.h> |
621934ee PM |
30 | #include <linux/mutex.h> |
31 | #include <linux/percpu.h> | |
32 | #include <linux/preempt.h> | |
33 | #include <linux/rcupdate.h> | |
34 | #include <linux/sched.h> | |
621934ee | 35 | #include <linux/smp.h> |
46fdb093 | 36 | #include <linux/delay.h> |
621934ee PM |
37 | #include <linux/srcu.h> |
38 | ||
3705b88d AM |
39 | #include <trace/events/rcu.h> |
40 | ||
41 | #include "rcu.h" | |
42 | ||
931ea9d1 LJ |
43 | /* |
44 | * Initialize an rcu_batch structure to empty. | |
45 | */ | |
46 | static inline void rcu_batch_init(struct rcu_batch *b) | |
47 | { | |
48 | b->head = NULL; | |
49 | b->tail = &b->head; | |
50 | } | |
51 | ||
52 | /* | |
53 | * Enqueue a callback onto the tail of the specified rcu_batch structure. | |
54 | */ | |
55 | static inline void rcu_batch_queue(struct rcu_batch *b, struct rcu_head *head) | |
56 | { | |
57 | *b->tail = head; | |
58 | b->tail = &head->next; | |
59 | } | |
60 | ||
61 | /* | |
62 | * Is the specified rcu_batch structure empty? | |
63 | */ | |
64 | static inline bool rcu_batch_empty(struct rcu_batch *b) | |
65 | { | |
66 | return b->tail == &b->head; | |
67 | } | |
68 | ||
69 | /* | |
70 | * Remove the callback at the head of the specified rcu_batch structure | |
71 | * and return a pointer to it, or return NULL if the structure is empty. | |
72 | */ | |
73 | static inline struct rcu_head *rcu_batch_dequeue(struct rcu_batch *b) | |
74 | { | |
75 | struct rcu_head *head; | |
76 | ||
77 | if (rcu_batch_empty(b)) | |
78 | return NULL; | |
79 | ||
80 | head = b->head; | |
81 | b->head = head->next; | |
82 | if (b->tail == &head->next) | |
83 | rcu_batch_init(b); | |
84 | ||
85 | return head; | |
86 | } | |
87 | ||
88 | /* | |
89 | * Move all callbacks from the rcu_batch structure specified by "from" to | |
90 | * the structure specified by "to". | |
91 | */ | |
92 | static inline void rcu_batch_move(struct rcu_batch *to, struct rcu_batch *from) | |
93 | { | |
94 | if (!rcu_batch_empty(from)) { | |
95 | *to->tail = from->head; | |
96 | to->tail = from->tail; | |
97 | rcu_batch_init(from); | |
98 | } | |
99 | } | |
100 | ||
632ee200 PM |
101 | static int init_srcu_struct_fields(struct srcu_struct *sp) |
102 | { | |
103 | sp->completed = 0; | |
931ea9d1 LJ |
104 | spin_lock_init(&sp->queue_lock); |
105 | sp->running = false; | |
106 | rcu_batch_init(&sp->batch_queue); | |
107 | rcu_batch_init(&sp->batch_check0); | |
108 | rcu_batch_init(&sp->batch_check1); | |
109 | rcu_batch_init(&sp->batch_done); | |
110 | INIT_DELAYED_WORK(&sp->work, process_srcu); | |
632ee200 PM |
111 | sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array); |
112 | return sp->per_cpu_ref ? 0 : -ENOMEM; | |
113 | } | |
114 | ||
115 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
116 | ||
117 | int __init_srcu_struct(struct srcu_struct *sp, const char *name, | |
118 | struct lock_class_key *key) | |
119 | { | |
632ee200 PM |
120 | /* Don't re-initialize a lock while it is held. */ |
121 | debug_check_no_locks_freed((void *)sp, sizeof(*sp)); | |
122 | lockdep_init_map(&sp->dep_map, name, key, 0); | |
632ee200 PM |
123 | return init_srcu_struct_fields(sp); |
124 | } | |
125 | EXPORT_SYMBOL_GPL(__init_srcu_struct); | |
126 | ||
127 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | |
128 | ||
621934ee PM |
129 | /** |
130 | * init_srcu_struct - initialize a sleep-RCU structure | |
131 | * @sp: structure to initialize. | |
132 | * | |
133 | * Must invoke this on a given srcu_struct before passing that srcu_struct | |
134 | * to any other function. Each srcu_struct represents a separate domain | |
135 | * of SRCU protection. | |
136 | */ | |
e6a92013 | 137 | int init_srcu_struct(struct srcu_struct *sp) |
621934ee | 138 | { |
632ee200 | 139 | return init_srcu_struct_fields(sp); |
621934ee | 140 | } |
0cd397d3 | 141 | EXPORT_SYMBOL_GPL(init_srcu_struct); |
621934ee | 142 | |
632ee200 PM |
143 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
144 | ||
b52ce066 LJ |
145 | /* |
146 | * Returns approximate total of the readers' ->seq[] values for the | |
147 | * rank of per-CPU counters specified by idx. | |
148 | */ | |
149 | static unsigned long srcu_readers_seq_idx(struct srcu_struct *sp, int idx) | |
150 | { | |
151 | int cpu; | |
152 | unsigned long sum = 0; | |
153 | unsigned long t; | |
154 | ||
155 | for_each_possible_cpu(cpu) { | |
156 | t = ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->seq[idx]); | |
157 | sum += t; | |
158 | } | |
159 | return sum; | |
160 | } | |
161 | ||
621934ee | 162 | /* |
cef50120 | 163 | * Returns approximate number of readers active on the specified rank |
b52ce066 | 164 | * of the per-CPU ->c[] counters. |
621934ee | 165 | */ |
cef50120 PM |
166 | static unsigned long srcu_readers_active_idx(struct srcu_struct *sp, int idx) |
167 | { | |
168 | int cpu; | |
169 | unsigned long sum = 0; | |
170 | unsigned long t; | |
621934ee | 171 | |
cef50120 PM |
172 | for_each_possible_cpu(cpu) { |
173 | t = ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[idx]); | |
174 | sum += t; | |
cef50120 | 175 | } |
b52ce066 | 176 | return sum; |
cef50120 PM |
177 | } |
178 | ||
179 | /* | |
b52ce066 LJ |
180 | * Return true if the number of pre-existing readers is determined to |
181 | * be stably zero. An example unstable zero can occur if the call | |
182 | * to srcu_readers_active_idx() misses an __srcu_read_lock() increment, | |
183 | * but due to task migration, sees the corresponding __srcu_read_unlock() | |
184 | * decrement. This can happen because srcu_readers_active_idx() takes | |
185 | * time to sum the array, and might in fact be interrupted or preempted | |
186 | * partway through the summation. | |
cef50120 PM |
187 | */ |
188 | static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx) | |
621934ee | 189 | { |
b52ce066 LJ |
190 | unsigned long seq; |
191 | ||
192 | seq = srcu_readers_seq_idx(sp, idx); | |
193 | ||
194 | /* | |
195 | * The following smp_mb() A pairs with the smp_mb() B located in | |
196 | * __srcu_read_lock(). This pairing ensures that if an | |
197 | * __srcu_read_lock() increments its counter after the summation | |
198 | * in srcu_readers_active_idx(), then the corresponding SRCU read-side | |
199 | * critical section will see any changes made prior to the start | |
200 | * of the current SRCU grace period. | |
201 | * | |
202 | * Also, if the above call to srcu_readers_seq_idx() saw the | |
203 | * increment of ->seq[], then the call to srcu_readers_active_idx() | |
204 | * must see the increment of ->c[]. | |
205 | */ | |
206 | smp_mb(); /* A */ | |
621934ee | 207 | |
cef50120 PM |
208 | /* |
209 | * Note that srcu_readers_active_idx() can incorrectly return | |
210 | * zero even though there is a pre-existing reader throughout. | |
211 | * To see this, suppose that task A is in a very long SRCU | |
212 | * read-side critical section that started on CPU 0, and that | |
b52ce066 | 213 | * no other reader exists, so that the sum of the counters |
cef50120 PM |
214 | * is equal to one. Then suppose that task B starts executing |
215 | * srcu_readers_active_idx(), summing up to CPU 1, and then that | |
216 | * task C starts reading on CPU 0, so that its increment is not | |
217 | * summed, but finishes reading on CPU 2, so that its decrement | |
218 | * -is- summed. Then when task B completes its sum, it will | |
219 | * incorrectly get zero, despite the fact that task A has been | |
220 | * in its SRCU read-side critical section the whole time. | |
221 | * | |
222 | * We therefore do a validation step should srcu_readers_active_idx() | |
223 | * return zero. | |
224 | */ | |
225 | if (srcu_readers_active_idx(sp, idx) != 0) | |
226 | return false; | |
227 | ||
228 | /* | |
b52ce066 LJ |
229 | * The remainder of this function is the validation step. |
230 | * The following smp_mb() D pairs with the smp_mb() C in | |
231 | * __srcu_read_unlock(). If the __srcu_read_unlock() was seen | |
232 | * by srcu_readers_active_idx() above, then any destructive | |
233 | * operation performed after the grace period will happen after | |
234 | * the corresponding SRCU read-side critical section. | |
cef50120 | 235 | * |
b52ce066 LJ |
236 | * Note that there can be at most NR_CPUS worth of readers using |
237 | * the old index, which is not enough to overflow even a 32-bit | |
238 | * integer. (Yes, this does mean that systems having more than | |
239 | * a billion or so CPUs need to be 64-bit systems.) Therefore, | |
240 | * the sum of the ->seq[] counters cannot possibly overflow. | |
241 | * Therefore, the only way that the return values of the two | |
242 | * calls to srcu_readers_seq_idx() can be equal is if there were | |
243 | * no increments of the corresponding rank of ->seq[] counts | |
244 | * in the interim. But the missed-increment scenario laid out | |
245 | * above includes an increment of the ->seq[] counter by | |
246 | * the corresponding __srcu_read_lock(). Therefore, if this | |
247 | * scenario occurs, the return values from the two calls to | |
248 | * srcu_readers_seq_idx() will differ, and thus the validation | |
249 | * step below suffices. | |
cef50120 | 250 | */ |
b52ce066 LJ |
251 | smp_mb(); /* D */ |
252 | ||
253 | return srcu_readers_seq_idx(sp, idx) == seq; | |
621934ee PM |
254 | } |
255 | ||
256 | /** | |
257 | * srcu_readers_active - returns approximate number of readers. | |
258 | * @sp: which srcu_struct to count active readers (holding srcu_read_lock). | |
259 | * | |
260 | * Note that this is not an atomic primitive, and can therefore suffer | |
261 | * severe errors when invoked on an active srcu_struct. That said, it | |
262 | * can be useful as an error check at cleanup time. | |
263 | */ | |
bb695170 | 264 | static int srcu_readers_active(struct srcu_struct *sp) |
621934ee | 265 | { |
dc879175 LJ |
266 | int cpu; |
267 | unsigned long sum = 0; | |
268 | ||
269 | for_each_possible_cpu(cpu) { | |
270 | sum += ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[0]); | |
271 | sum += ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[1]); | |
272 | } | |
273 | return sum; | |
621934ee PM |
274 | } |
275 | ||
276 | /** | |
277 | * cleanup_srcu_struct - deconstruct a sleep-RCU structure | |
278 | * @sp: structure to clean up. | |
279 | * | |
280 | * Must invoke this after you are finished using a given srcu_struct that | |
281 | * was initialized via init_srcu_struct(), else you leak memory. | |
282 | */ | |
283 | void cleanup_srcu_struct(struct srcu_struct *sp) | |
284 | { | |
ab4d2986 LJ |
285 | if (WARN_ON(srcu_readers_active(sp))) |
286 | return; /* Leakage unless caller handles error. */ | |
621934ee PM |
287 | free_percpu(sp->per_cpu_ref); |
288 | sp->per_cpu_ref = NULL; | |
289 | } | |
0cd397d3 | 290 | EXPORT_SYMBOL_GPL(cleanup_srcu_struct); |
621934ee | 291 | |
632ee200 | 292 | /* |
621934ee PM |
293 | * Counts the new reader in the appropriate per-CPU element of the |
294 | * srcu_struct. Must be called from process context. | |
295 | * Returns an index that must be passed to the matching srcu_read_unlock(). | |
296 | */ | |
632ee200 | 297 | int __srcu_read_lock(struct srcu_struct *sp) |
621934ee PM |
298 | { |
299 | int idx; | |
300 | ||
7a6b55e7 | 301 | idx = ACCESS_ONCE(sp->completed) & 0x1; |
621934ee | 302 | preempt_disable(); |
b52ce066 | 303 | ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1; |
cef50120 | 304 | smp_mb(); /* B */ /* Avoid leaking the critical section. */ |
b52ce066 | 305 | ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1; |
621934ee PM |
306 | preempt_enable(); |
307 | return idx; | |
308 | } | |
632ee200 | 309 | EXPORT_SYMBOL_GPL(__srcu_read_lock); |
621934ee | 310 | |
632ee200 | 311 | /* |
621934ee PM |
312 | * Removes the count for the old reader from the appropriate per-CPU |
313 | * element of the srcu_struct. Note that this may well be a different | |
314 | * CPU than that which was incremented by the corresponding srcu_read_lock(). | |
315 | * Must be called from process context. | |
316 | */ | |
632ee200 | 317 | void __srcu_read_unlock(struct srcu_struct *sp, int idx) |
621934ee | 318 | { |
cef50120 | 319 | smp_mb(); /* C */ /* Avoid leaking the critical section. */ |
5a41344a | 320 | this_cpu_dec(sp->per_cpu_ref->c[idx]); |
621934ee | 321 | } |
632ee200 | 322 | EXPORT_SYMBOL_GPL(__srcu_read_unlock); |
621934ee | 323 | |
c072a388 PM |
324 | /* |
325 | * We use an adaptive strategy for synchronize_srcu() and especially for | |
326 | * synchronize_srcu_expedited(). We spin for a fixed time period | |
327 | * (defined below) to allow SRCU readers to exit their read-side critical | |
328 | * sections. If there are still some readers after 10 microseconds, | |
329 | * we repeatedly block for 1-millisecond time periods. This approach | |
330 | * has done well in testing, so there is no need for a config parameter. | |
331 | */ | |
931ea9d1 | 332 | #define SRCU_RETRY_CHECK_DELAY 5 |
d9792edd LJ |
333 | #define SYNCHRONIZE_SRCU_TRYCOUNT 2 |
334 | #define SYNCHRONIZE_SRCU_EXP_TRYCOUNT 12 | |
cef50120 | 335 | |
18108ebf | 336 | /* |
931ea9d1 | 337 | * @@@ Wait until all pre-existing readers complete. Such readers |
18108ebf | 338 | * will have used the index specified by "idx". |
931ea9d1 LJ |
339 | * the caller should ensures the ->completed is not changed while checking |
340 | * and idx = (->completed & 1) ^ 1 | |
18108ebf | 341 | */ |
931ea9d1 | 342 | static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount) |
cef50120 | 343 | { |
931ea9d1 LJ |
344 | for (;;) { |
345 | if (srcu_readers_active_idx_check(sp, idx)) | |
346 | return true; | |
347 | if (--trycount <= 0) | |
348 | return false; | |
349 | udelay(SRCU_RETRY_CHECK_DELAY); | |
cef50120 | 350 | } |
cef50120 | 351 | } |
c072a388 | 352 | |
931ea9d1 LJ |
353 | /* |
354 | * Increment the ->completed counter so that future SRCU readers will | |
355 | * use the other rank of the ->c[] and ->seq[] arrays. This allows | |
356 | * us to wait for pre-existing readers in a starvation-free manner. | |
357 | */ | |
18108ebf | 358 | static void srcu_flip(struct srcu_struct *sp) |
944ce9af | 359 | { |
18108ebf | 360 | sp->completed++; |
944ce9af LJ |
361 | } |
362 | ||
931ea9d1 LJ |
363 | /* |
364 | * Enqueue an SRCU callback on the specified srcu_struct structure, | |
365 | * initiating grace-period processing if it is not already running. | |
bc72d962 PM |
366 | * |
367 | * Note that all CPUs must agree that the grace period extended beyond | |
368 | * all pre-existing SRCU read-side critical section. On systems with | |
369 | * more than one CPU, this means that when "func()" is invoked, each CPU | |
370 | * is guaranteed to have executed a full memory barrier since the end of | |
371 | * its last corresponding SRCU read-side critical section whose beginning | |
372 | * preceded the call to call_rcu(). It also means that each CPU executing | |
373 | * an SRCU read-side critical section that continues beyond the start of | |
374 | * "func()" must have executed a memory barrier after the call_rcu() | |
375 | * but before the beginning of that SRCU read-side critical section. | |
376 | * Note that these guarantees include CPUs that are offline, idle, or | |
377 | * executing in user mode, as well as CPUs that are executing in the kernel. | |
378 | * | |
379 | * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the | |
380 | * resulting SRCU callback function "func()", then both CPU A and CPU | |
381 | * B are guaranteed to execute a full memory barrier during the time | |
382 | * interval between the call to call_rcu() and the invocation of "func()". | |
383 | * This guarantee applies even if CPU A and CPU B are the same CPU (but | |
384 | * again only if the system has more than one CPU). | |
385 | * | |
386 | * Of course, these guarantees apply only for invocations of call_srcu(), | |
387 | * srcu_read_lock(), and srcu_read_unlock() that are all passed the same | |
388 | * srcu_struct structure. | |
931ea9d1 LJ |
389 | */ |
390 | void call_srcu(struct srcu_struct *sp, struct rcu_head *head, | |
391 | void (*func)(struct rcu_head *head)) | |
392 | { | |
393 | unsigned long flags; | |
394 | ||
395 | head->next = NULL; | |
396 | head->func = func; | |
397 | spin_lock_irqsave(&sp->queue_lock, flags); | |
398 | rcu_batch_queue(&sp->batch_queue, head); | |
399 | if (!sp->running) { | |
400 | sp->running = true; | |
3b07e9ca | 401 | schedule_delayed_work(&sp->work, 0); |
931ea9d1 LJ |
402 | } |
403 | spin_unlock_irqrestore(&sp->queue_lock, flags); | |
404 | } | |
405 | EXPORT_SYMBOL_GPL(call_srcu); | |
406 | ||
407 | struct rcu_synchronize { | |
408 | struct rcu_head head; | |
409 | struct completion completion; | |
410 | }; | |
411 | ||
412 | /* | |
413 | * Awaken the corresponding synchronize_srcu() instance now that a | |
414 | * grace period has elapsed. | |
415 | */ | |
416 | static void wakeme_after_rcu(struct rcu_head *head) | |
417 | { | |
418 | struct rcu_synchronize *rcu; | |
419 | ||
420 | rcu = container_of(head, struct rcu_synchronize, head); | |
421 | complete(&rcu->completion); | |
422 | } | |
423 | ||
424 | static void srcu_advance_batches(struct srcu_struct *sp, int trycount); | |
425 | static void srcu_reschedule(struct srcu_struct *sp); | |
426 | ||
0cd397d3 PM |
427 | /* |
428 | * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). | |
621934ee | 429 | */ |
d9792edd | 430 | static void __synchronize_srcu(struct srcu_struct *sp, int trycount) |
621934ee | 431 | { |
931ea9d1 LJ |
432 | struct rcu_synchronize rcu; |
433 | struct rcu_head *head = &rcu.head; | |
434 | bool done = false; | |
18108ebf | 435 | |
fe15d706 PM |
436 | rcu_lockdep_assert(!lock_is_held(&sp->dep_map) && |
437 | !lock_is_held(&rcu_bh_lock_map) && | |
438 | !lock_is_held(&rcu_lock_map) && | |
439 | !lock_is_held(&rcu_sched_lock_map), | |
440 | "Illegal synchronize_srcu() in same-type SRCU (or RCU) read-side critical section"); | |
441 | ||
6e6f1b30 | 442 | might_sleep(); |
931ea9d1 LJ |
443 | init_completion(&rcu.completion); |
444 | ||
445 | head->next = NULL; | |
446 | head->func = wakeme_after_rcu; | |
447 | spin_lock_irq(&sp->queue_lock); | |
448 | if (!sp->running) { | |
449 | /* steal the processing owner */ | |
450 | sp->running = true; | |
451 | rcu_batch_queue(&sp->batch_check0, head); | |
452 | spin_unlock_irq(&sp->queue_lock); | |
453 | ||
454 | srcu_advance_batches(sp, trycount); | |
455 | if (!rcu_batch_empty(&sp->batch_done)) { | |
456 | BUG_ON(sp->batch_done.head != head); | |
457 | rcu_batch_dequeue(&sp->batch_done); | |
458 | done = true; | |
459 | } | |
460 | /* give the processing owner to work_struct */ | |
461 | srcu_reschedule(sp); | |
462 | } else { | |
463 | rcu_batch_queue(&sp->batch_queue, head); | |
464 | spin_unlock_irq(&sp->queue_lock); | |
465 | } | |
944ce9af | 466 | |
931ea9d1 LJ |
467 | if (!done) |
468 | wait_for_completion(&rcu.completion); | |
621934ee PM |
469 | } |
470 | ||
0cd397d3 PM |
471 | /** |
472 | * synchronize_srcu - wait for prior SRCU read-side critical-section completion | |
473 | * @sp: srcu_struct with which to synchronize. | |
474 | * | |
34a64b6b LJ |
475 | * Wait for the count to drain to zero of both indexes. To avoid the |
476 | * possible starvation of synchronize_srcu(), it waits for the count of | |
477 | * the index=((->completed & 1) ^ 1) to drain to zero at first, | |
478 | * and then flip the completed and wait for the count of the other index. | |
479 | * | |
480 | * Can block; must be called from process context. | |
0cd397d3 PM |
481 | * |
482 | * Note that it is illegal to call synchronize_srcu() from the corresponding | |
483 | * SRCU read-side critical section; doing so will result in deadlock. | |
484 | * However, it is perfectly legal to call synchronize_srcu() on one | |
bc72d962 PM |
485 | * srcu_struct from some other srcu_struct's read-side critical section, |
486 | * as long as the resulting graph of srcu_structs is acyclic. | |
487 | * | |
488 | * There are memory-ordering constraints implied by synchronize_srcu(). | |
489 | * On systems with more than one CPU, when synchronize_srcu() returns, | |
490 | * each CPU is guaranteed to have executed a full memory barrier since | |
491 | * the end of its last corresponding SRCU-sched read-side critical section | |
492 | * whose beginning preceded the call to synchronize_srcu(). In addition, | |
493 | * each CPU having an SRCU read-side critical section that extends beyond | |
494 | * the return from synchronize_srcu() is guaranteed to have executed a | |
495 | * full memory barrier after the beginning of synchronize_srcu() and before | |
496 | * the beginning of that SRCU read-side critical section. Note that these | |
497 | * guarantees include CPUs that are offline, idle, or executing in user mode, | |
498 | * as well as CPUs that are executing in the kernel. | |
499 | * | |
500 | * Furthermore, if CPU A invoked synchronize_srcu(), which returned | |
501 | * to its caller on CPU B, then both CPU A and CPU B are guaranteed | |
502 | * to have executed a full memory barrier during the execution of | |
503 | * synchronize_srcu(). This guarantee applies even if CPU A and CPU B | |
504 | * are the same CPU, but again only if the system has more than one CPU. | |
505 | * | |
506 | * Of course, these memory-ordering guarantees apply only when | |
507 | * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are | |
508 | * passed the same srcu_struct structure. | |
0cd397d3 PM |
509 | */ |
510 | void synchronize_srcu(struct srcu_struct *sp) | |
511 | { | |
3705b88d AM |
512 | __synchronize_srcu(sp, rcu_expedited |
513 | ? SYNCHRONIZE_SRCU_EXP_TRYCOUNT | |
514 | : SYNCHRONIZE_SRCU_TRYCOUNT); | |
0cd397d3 PM |
515 | } |
516 | EXPORT_SYMBOL_GPL(synchronize_srcu); | |
517 | ||
518 | /** | |
236fefaf | 519 | * synchronize_srcu_expedited - Brute-force SRCU grace period |
0cd397d3 PM |
520 | * @sp: srcu_struct with which to synchronize. |
521 | * | |
cef50120 PM |
522 | * Wait for an SRCU grace period to elapse, but be more aggressive about |
523 | * spinning rather than blocking when waiting. | |
0cd397d3 | 524 | * |
bc72d962 PM |
525 | * Note that synchronize_srcu_expedited() has the same deadlock and |
526 | * memory-ordering properties as does synchronize_srcu(). | |
0cd397d3 PM |
527 | */ |
528 | void synchronize_srcu_expedited(struct srcu_struct *sp) | |
529 | { | |
d9792edd | 530 | __synchronize_srcu(sp, SYNCHRONIZE_SRCU_EXP_TRYCOUNT); |
0cd397d3 PM |
531 | } |
532 | EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); | |
533 | ||
931ea9d1 LJ |
534 | /** |
535 | * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete. | |
4461212a | 536 | * @sp: srcu_struct on which to wait for in-flight callbacks. |
931ea9d1 LJ |
537 | */ |
538 | void srcu_barrier(struct srcu_struct *sp) | |
539 | { | |
540 | synchronize_srcu(sp); | |
541 | } | |
542 | EXPORT_SYMBOL_GPL(srcu_barrier); | |
543 | ||
621934ee PM |
544 | /** |
545 | * srcu_batches_completed - return batches completed. | |
546 | * @sp: srcu_struct on which to report batch completion. | |
547 | * | |
548 | * Report the number of batches, correlated with, but not necessarily | |
549 | * precisely the same as, the number of grace periods that have elapsed. | |
550 | */ | |
621934ee PM |
551 | long srcu_batches_completed(struct srcu_struct *sp) |
552 | { | |
553 | return sp->completed; | |
554 | } | |
621934ee | 555 | EXPORT_SYMBOL_GPL(srcu_batches_completed); |
931ea9d1 LJ |
556 | |
557 | #define SRCU_CALLBACK_BATCH 10 | |
558 | #define SRCU_INTERVAL 1 | |
559 | ||
560 | /* | |
561 | * Move any new SRCU callbacks to the first stage of the SRCU grace | |
562 | * period pipeline. | |
563 | */ | |
564 | static void srcu_collect_new(struct srcu_struct *sp) | |
565 | { | |
566 | if (!rcu_batch_empty(&sp->batch_queue)) { | |
567 | spin_lock_irq(&sp->queue_lock); | |
568 | rcu_batch_move(&sp->batch_check0, &sp->batch_queue); | |
569 | spin_unlock_irq(&sp->queue_lock); | |
570 | } | |
571 | } | |
572 | ||
573 | /* | |
574 | * Core SRCU state machine. Advance callbacks from ->batch_check0 to | |
575 | * ->batch_check1 and then to ->batch_done as readers drain. | |
576 | */ | |
577 | static void srcu_advance_batches(struct srcu_struct *sp, int trycount) | |
578 | { | |
579 | int idx = 1 ^ (sp->completed & 1); | |
580 | ||
581 | /* | |
582 | * Because readers might be delayed for an extended period after | |
583 | * fetching ->completed for their index, at any point in time there | |
584 | * might well be readers using both idx=0 and idx=1. We therefore | |
585 | * need to wait for readers to clear from both index values before | |
586 | * invoking a callback. | |
587 | */ | |
588 | ||
589 | if (rcu_batch_empty(&sp->batch_check0) && | |
590 | rcu_batch_empty(&sp->batch_check1)) | |
591 | return; /* no callbacks need to be advanced */ | |
592 | ||
593 | if (!try_check_zero(sp, idx, trycount)) | |
594 | return; /* failed to advance, will try after SRCU_INTERVAL */ | |
595 | ||
596 | /* | |
597 | * The callbacks in ->batch_check1 have already done with their | |
598 | * first zero check and flip back when they were enqueued on | |
599 | * ->batch_check0 in a previous invocation of srcu_advance_batches(). | |
600 | * (Presumably try_check_zero() returned false during that | |
601 | * invocation, leaving the callbacks stranded on ->batch_check1.) | |
602 | * They are therefore ready to invoke, so move them to ->batch_done. | |
603 | */ | |
604 | rcu_batch_move(&sp->batch_done, &sp->batch_check1); | |
605 | ||
606 | if (rcu_batch_empty(&sp->batch_check0)) | |
607 | return; /* no callbacks need to be advanced */ | |
608 | srcu_flip(sp); | |
609 | ||
610 | /* | |
611 | * The callbacks in ->batch_check0 just finished their | |
612 | * first check zero and flip, so move them to ->batch_check1 | |
613 | * for future checking on the other idx. | |
614 | */ | |
615 | rcu_batch_move(&sp->batch_check1, &sp->batch_check0); | |
616 | ||
617 | /* | |
618 | * SRCU read-side critical sections are normally short, so check | |
619 | * at least twice in quick succession after a flip. | |
620 | */ | |
621 | trycount = trycount < 2 ? 2 : trycount; | |
622 | if (!try_check_zero(sp, idx^1, trycount)) | |
623 | return; /* failed to advance, will try after SRCU_INTERVAL */ | |
624 | ||
625 | /* | |
626 | * The callbacks in ->batch_check1 have now waited for all | |
627 | * pre-existing readers using both idx values. They are therefore | |
628 | * ready to invoke, so move them to ->batch_done. | |
629 | */ | |
630 | rcu_batch_move(&sp->batch_done, &sp->batch_check1); | |
631 | } | |
632 | ||
633 | /* | |
634 | * Invoke a limited number of SRCU callbacks that have passed through | |
635 | * their grace period. If there are more to do, SRCU will reschedule | |
636 | * the workqueue. | |
637 | */ | |
638 | static void srcu_invoke_callbacks(struct srcu_struct *sp) | |
639 | { | |
640 | int i; | |
641 | struct rcu_head *head; | |
642 | ||
643 | for (i = 0; i < SRCU_CALLBACK_BATCH; i++) { | |
644 | head = rcu_batch_dequeue(&sp->batch_done); | |
645 | if (!head) | |
646 | break; | |
647 | local_bh_disable(); | |
648 | head->func(head); | |
649 | local_bh_enable(); | |
650 | } | |
651 | } | |
652 | ||
653 | /* | |
654 | * Finished one round of SRCU grace period. Start another if there are | |
655 | * more SRCU callbacks queued, otherwise put SRCU into not-running state. | |
656 | */ | |
657 | static void srcu_reschedule(struct srcu_struct *sp) | |
658 | { | |
659 | bool pending = true; | |
660 | ||
661 | if (rcu_batch_empty(&sp->batch_done) && | |
662 | rcu_batch_empty(&sp->batch_check1) && | |
663 | rcu_batch_empty(&sp->batch_check0) && | |
664 | rcu_batch_empty(&sp->batch_queue)) { | |
665 | spin_lock_irq(&sp->queue_lock); | |
666 | if (rcu_batch_empty(&sp->batch_done) && | |
667 | rcu_batch_empty(&sp->batch_check1) && | |
668 | rcu_batch_empty(&sp->batch_check0) && | |
669 | rcu_batch_empty(&sp->batch_queue)) { | |
670 | sp->running = false; | |
671 | pending = false; | |
672 | } | |
673 | spin_unlock_irq(&sp->queue_lock); | |
674 | } | |
675 | ||
676 | if (pending) | |
3b07e9ca | 677 | schedule_delayed_work(&sp->work, SRCU_INTERVAL); |
931ea9d1 LJ |
678 | } |
679 | ||
680 | /* | |
681 | * This is the work-queue function that handles SRCU grace periods. | |
682 | */ | |
f2ebfbc9 | 683 | void process_srcu(struct work_struct *work) |
931ea9d1 LJ |
684 | { |
685 | struct srcu_struct *sp; | |
686 | ||
687 | sp = container_of(work, struct srcu_struct, work.work); | |
688 | ||
689 | srcu_collect_new(sp); | |
690 | srcu_advance_batches(sp, 1); | |
691 | srcu_invoke_callbacks(sp); | |
692 | srcu_reschedule(sp); | |
693 | } | |
f2ebfbc9 | 694 | EXPORT_SYMBOL_GPL(process_srcu); |