Commit | Line | Data |
---|---|---|
a241ec65 | 1 | /* |
29766f1e | 2 | * Read-Copy Update module-based torture test facility |
a241ec65 PM |
3 | * |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
17 | * | |
b772e1dd | 18 | * Copyright (C) IBM Corporation, 2005, 2006 |
a241ec65 PM |
19 | * |
20 | * Authors: Paul E. McKenney <paulmck@us.ibm.com> | |
a71fca58 | 21 | * Josh Triplett <josh@freedesktop.org> |
a241ec65 PM |
22 | * |
23 | * See also: Documentation/RCU/torture.txt | |
24 | */ | |
25 | #include <linux/types.h> | |
26 | #include <linux/kernel.h> | |
27 | #include <linux/init.h> | |
28 | #include <linux/module.h> | |
29 | #include <linux/kthread.h> | |
30 | #include <linux/err.h> | |
31 | #include <linux/spinlock.h> | |
32 | #include <linux/smp.h> | |
33 | #include <linux/rcupdate.h> | |
34 | #include <linux/interrupt.h> | |
35 | #include <linux/sched.h> | |
60063497 | 36 | #include <linux/atomic.h> |
a241ec65 | 37 | #include <linux/bitops.h> |
a241ec65 PM |
38 | #include <linux/completion.h> |
39 | #include <linux/moduleparam.h> | |
40 | #include <linux/percpu.h> | |
41 | #include <linux/notifier.h> | |
343e9099 | 42 | #include <linux/reboot.h> |
83144186 | 43 | #include <linux/freezer.h> |
a241ec65 | 44 | #include <linux/cpu.h> |
a241ec65 | 45 | #include <linux/delay.h> |
a241ec65 | 46 | #include <linux/stat.h> |
b2896d2e | 47 | #include <linux/srcu.h> |
1aeb272c | 48 | #include <linux/slab.h> |
52494535 | 49 | #include <linux/trace_clock.h> |
f07767fd | 50 | #include <asm/byteorder.h> |
51b1130e | 51 | #include <linux/torture.h> |
a241ec65 PM |
52 | |
53 | MODULE_LICENSE("GPL"); | |
5cf05ad7 | 54 | MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@freedesktop.org>"); |
a241ec65 | 55 | |
4102adab PM |
56 | MODULE_ALIAS("rcutorture"); |
57 | #ifdef MODULE_PARAM_PREFIX | |
58 | #undef MODULE_PARAM_PREFIX | |
59 | #endif | |
60 | #define MODULE_PARAM_PREFIX "rcutorture." | |
61 | ||
9e250225 PM |
62 | torture_param(int, fqs_duration, 0, |
63 | "Duration of fqs bursts (us), 0 to disable"); | |
64 | torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); | |
65 | torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)"); | |
66 | torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); | |
67 | torture_param(bool, gp_normal, false, | |
68 | "Use normal (non-expedited) GP wait primitives"); | |
69 | torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers"); | |
70 | torture_param(int, n_barrier_cbs, 0, | |
71 | "# of callbacks/kthreads for barrier testing"); | |
72 | torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads"); | |
73 | torture_param(int, nreaders, -1, "Number of RCU reader threads"); | |
74 | torture_param(int, object_debug, 0, | |
75 | "Enable debug-object double call_rcu() testing"); | |
76 | torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); | |
77 | torture_param(int, onoff_interval, 0, | |
78 | "Time between CPU hotplugs (s), 0=disable"); | |
79 | torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles"); | |
80 | torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); | |
81 | torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); | |
82 | torture_param(int, stall_cpu_holdoff, 10, | |
83 | "Time to wait before starting stall (s)."); | |
84 | torture_param(int, stat_interval, 60, | |
85 | "Number of seconds between stats printk()s"); | |
86 | torture_param(int, stutter, 5, "Number of seconds to run/halt test"); | |
87 | torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); | |
88 | torture_param(int, test_boost_duration, 4, | |
89 | "Duration of each boost test, seconds."); | |
90 | torture_param(int, test_boost_interval, 7, | |
91 | "Interval between boost tests, seconds."); | |
92 | torture_param(bool, test_no_idle_hz, true, | |
93 | "Test support for tickless idle CPUs"); | |
b5daa8f3 PM |
94 | torture_param(bool, verbose, true, |
95 | "Enable verbose debugging printk()s"); | |
9e250225 | 96 | |
b5daa8f3 | 97 | static char *torture_type = "rcu"; |
d6ad6711 | 98 | module_param(torture_type, charp, 0444); |
d10453e9 | 99 | MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, ...)"); |
72e9bb54 | 100 | |
a241ec65 PM |
101 | static int nrealreaders; |
102 | static struct task_struct *writer_task; | |
b772e1dd | 103 | static struct task_struct **fakewriter_tasks; |
a241ec65 PM |
104 | static struct task_struct **reader_tasks; |
105 | static struct task_struct *stats_task; | |
bf66f18e | 106 | static struct task_struct *fqs_task; |
8e8be45e | 107 | static struct task_struct *boost_tasks[NR_CPUS]; |
c13f3757 | 108 | static struct task_struct *stall_task; |
fae4b54f PM |
109 | static struct task_struct **barrier_cbs_tasks; |
110 | static struct task_struct *barrier_task; | |
a241ec65 PM |
111 | |
112 | #define RCU_TORTURE_PIPE_LEN 10 | |
113 | ||
114 | struct rcu_torture { | |
115 | struct rcu_head rtort_rcu; | |
116 | int rtort_pipe_count; | |
117 | struct list_head rtort_free; | |
996417d2 | 118 | int rtort_mbtest; |
a241ec65 PM |
119 | }; |
120 | ||
a241ec65 | 121 | static LIST_HEAD(rcu_torture_freelist); |
0ddea0ea | 122 | static struct rcu_torture __rcu *rcu_torture_current; |
4a298656 | 123 | static unsigned long rcu_torture_current_version; |
a241ec65 PM |
124 | static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; |
125 | static DEFINE_SPINLOCK(rcu_torture_lock); | |
806274c0 PM |
126 | static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], |
127 | rcu_torture_count) = { 0 }; | |
128 | static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], | |
129 | rcu_torture_batch) = { 0 }; | |
a241ec65 | 130 | static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; |
b2896d2e PM |
131 | static atomic_t n_rcu_torture_alloc; |
132 | static atomic_t n_rcu_torture_alloc_fail; | |
133 | static atomic_t n_rcu_torture_free; | |
134 | static atomic_t n_rcu_torture_mberror; | |
135 | static atomic_t n_rcu_torture_error; | |
fae4b54f | 136 | static long n_rcu_torture_barrier_error; |
8e8be45e PM |
137 | static long n_rcu_torture_boost_ktrerror; |
138 | static long n_rcu_torture_boost_rterror; | |
8e8be45e PM |
139 | static long n_rcu_torture_boost_failure; |
140 | static long n_rcu_torture_boosts; | |
a71fca58 | 141 | static long n_rcu_torture_timers; |
fae4b54f PM |
142 | static long n_barrier_attempts; |
143 | static long n_barrier_successes; | |
e3033736 | 144 | static struct list_head rcu_torture_removed; |
a241ec65 | 145 | |
31a72bce PM |
146 | #if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE) |
147 | #define RCUTORTURE_RUNNABLE_INIT 1 | |
148 | #else | |
149 | #define RCUTORTURE_RUNNABLE_INIT 0 | |
150 | #endif | |
151 | int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT; | |
bb3bf705 PM |
152 | module_param(rcutorture_runnable, int, 0444); |
153 | MODULE_PARM_DESC(rcutorture_runnable, "Start rcutorture at boot"); | |
31a72bce | 154 | |
3acf4a9a | 155 | #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) |
8e8be45e | 156 | #define rcu_can_boost() 1 |
3acf4a9a | 157 | #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */ |
8e8be45e | 158 | #define rcu_can_boost() 0 |
3acf4a9a | 159 | #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */ |
8e8be45e | 160 | |
e4aa0da3 SR |
161 | #ifdef CONFIG_RCU_TRACE |
162 | static u64 notrace rcu_trace_clock_local(void) | |
163 | { | |
164 | u64 ts = trace_clock_local(); | |
165 | unsigned long __maybe_unused ts_rem = do_div(ts, NSEC_PER_USEC); | |
166 | return ts; | |
167 | } | |
168 | #else /* #ifdef CONFIG_RCU_TRACE */ | |
169 | static u64 notrace rcu_trace_clock_local(void) | |
170 | { | |
171 | return 0ULL; | |
172 | } | |
173 | #endif /* #else #ifdef CONFIG_RCU_TRACE */ | |
174 | ||
8e8be45e PM |
175 | static unsigned long boost_starttime; /* jiffies of next boost test start. */ |
176 | DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ | |
177 | /* and boost task create/destroy. */ | |
fae4b54f | 178 | static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */ |
c6ebcbb6 | 179 | static bool barrier_phase; /* Test phase. */ |
fae4b54f PM |
180 | static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */ |
181 | static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ | |
182 | static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); | |
8e8be45e | 183 | |
a241ec65 PM |
184 | /* |
185 | * Allocate an element from the rcu_tortures pool. | |
186 | */ | |
97a41e26 | 187 | static struct rcu_torture * |
a241ec65 PM |
188 | rcu_torture_alloc(void) |
189 | { | |
190 | struct list_head *p; | |
191 | ||
adac1665 | 192 | spin_lock_bh(&rcu_torture_lock); |
a241ec65 PM |
193 | if (list_empty(&rcu_torture_freelist)) { |
194 | atomic_inc(&n_rcu_torture_alloc_fail); | |
adac1665 | 195 | spin_unlock_bh(&rcu_torture_lock); |
a241ec65 PM |
196 | return NULL; |
197 | } | |
198 | atomic_inc(&n_rcu_torture_alloc); | |
199 | p = rcu_torture_freelist.next; | |
200 | list_del_init(p); | |
adac1665 | 201 | spin_unlock_bh(&rcu_torture_lock); |
a241ec65 PM |
202 | return container_of(p, struct rcu_torture, rtort_free); |
203 | } | |
204 | ||
205 | /* | |
206 | * Free an element to the rcu_tortures pool. | |
207 | */ | |
208 | static void | |
209 | rcu_torture_free(struct rcu_torture *p) | |
210 | { | |
211 | atomic_inc(&n_rcu_torture_free); | |
adac1665 | 212 | spin_lock_bh(&rcu_torture_lock); |
a241ec65 | 213 | list_add_tail(&p->rtort_free, &rcu_torture_freelist); |
adac1665 | 214 | spin_unlock_bh(&rcu_torture_lock); |
a241ec65 PM |
215 | } |
216 | ||
72e9bb54 PM |
217 | /* |
218 | * Operations vector for selecting different types of tests. | |
219 | */ | |
220 | ||
221 | struct rcu_torture_ops { | |
222 | void (*init)(void); | |
72e9bb54 | 223 | int (*readlock)(void); |
51b1130e | 224 | void (*read_delay)(struct torture_random_state *rrsp); |
72e9bb54 PM |
225 | void (*readunlock)(int idx); |
226 | int (*completed)(void); | |
0acc512c | 227 | void (*deferred_free)(struct rcu_torture *p); |
b772e1dd | 228 | void (*sync)(void); |
2ec1f2d9 | 229 | void (*exp_sync)(void); |
fae4b54f | 230 | void (*call)(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); |
2326974d | 231 | void (*cb_barrier)(void); |
bf66f18e | 232 | void (*fqs)(void); |
d1008950 | 233 | void (*stats)(char *page); |
0acc512c | 234 | int irq_capable; |
8e8be45e | 235 | int can_boost; |
e66c33d5 | 236 | const char *name; |
72e9bb54 | 237 | }; |
a71fca58 PM |
238 | |
239 | static struct rcu_torture_ops *cur_ops; | |
72e9bb54 PM |
240 | |
241 | /* | |
242 | * Definitions for rcu torture testing. | |
243 | */ | |
244 | ||
a49a4af7 | 245 | static int rcu_torture_read_lock(void) __acquires(RCU) |
72e9bb54 PM |
246 | { |
247 | rcu_read_lock(); | |
248 | return 0; | |
249 | } | |
250 | ||
51b1130e | 251 | static void rcu_read_delay(struct torture_random_state *rrsp) |
b2896d2e | 252 | { |
b8d57a76 JT |
253 | const unsigned long shortdelay_us = 200; |
254 | const unsigned long longdelay_ms = 50; | |
b2896d2e | 255 | |
b8d57a76 JT |
256 | /* We want a short delay sometimes to make a reader delay the grace |
257 | * period, and we want a long delay occasionally to trigger | |
258 | * force_quiescent_state. */ | |
b2896d2e | 259 | |
51b1130e | 260 | if (!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) |
b8d57a76 | 261 | mdelay(longdelay_ms); |
51b1130e | 262 | if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) |
b8d57a76 | 263 | udelay(shortdelay_us); |
e546f485 | 264 | #ifdef CONFIG_PREEMPT |
51b1130e PM |
265 | if (!preempt_count() && |
266 | !(torture_random(rrsp) % (nrealreaders * 20000))) | |
e546f485 LJ |
267 | preempt_schedule(); /* No QS if preempt_disable() in effect */ |
268 | #endif | |
b2896d2e PM |
269 | } |
270 | ||
a49a4af7 | 271 | static void rcu_torture_read_unlock(int idx) __releases(RCU) |
72e9bb54 PM |
272 | { |
273 | rcu_read_unlock(); | |
274 | } | |
275 | ||
276 | static int rcu_torture_completed(void) | |
277 | { | |
278 | return rcu_batches_completed(); | |
279 | } | |
280 | ||
281 | static void | |
282 | rcu_torture_cb(struct rcu_head *p) | |
283 | { | |
284 | int i; | |
285 | struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); | |
286 | ||
36970bb9 | 287 | if (torture_must_stop_irq()) { |
72e9bb54 PM |
288 | /* Test is ending, just drop callbacks on the floor. */ |
289 | /* The next initialization will pick up the pieces. */ | |
290 | return; | |
291 | } | |
292 | i = rp->rtort_pipe_count; | |
293 | if (i > RCU_TORTURE_PIPE_LEN) | |
294 | i = RCU_TORTURE_PIPE_LEN; | |
295 | atomic_inc(&rcu_torture_wcount[i]); | |
296 | if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { | |
297 | rp->rtort_mbtest = 0; | |
298 | rcu_torture_free(rp); | |
c701d5d9 | 299 | } else { |
0acc512c | 300 | cur_ops->deferred_free(rp); |
c701d5d9 | 301 | } |
72e9bb54 PM |
302 | } |
303 | ||
d9a3da06 PM |
304 | static int rcu_no_completed(void) |
305 | { | |
306 | return 0; | |
307 | } | |
308 | ||
72e9bb54 PM |
309 | static void rcu_torture_deferred_free(struct rcu_torture *p) |
310 | { | |
311 | call_rcu(&p->rtort_rcu, rcu_torture_cb); | |
312 | } | |
313 | ||
e3033736 JT |
314 | static void rcu_sync_torture_init(void) |
315 | { | |
316 | INIT_LIST_HEAD(&rcu_torture_removed); | |
317 | } | |
318 | ||
2ec1f2d9 | 319 | static struct rcu_torture_ops rcu_ops = { |
0acc512c | 320 | .init = rcu_sync_torture_init, |
0acc512c PM |
321 | .readlock = rcu_torture_read_lock, |
322 | .read_delay = rcu_read_delay, | |
323 | .readunlock = rcu_torture_read_unlock, | |
324 | .completed = rcu_torture_completed, | |
2ec1f2d9 | 325 | .deferred_free = rcu_torture_deferred_free, |
0acc512c | 326 | .sync = synchronize_rcu, |
2ec1f2d9 PM |
327 | .exp_sync = synchronize_rcu_expedited, |
328 | .call = call_rcu, | |
329 | .cb_barrier = rcu_barrier, | |
bf66f18e | 330 | .fqs = rcu_force_quiescent_state, |
d9a3da06 PM |
331 | .stats = NULL, |
332 | .irq_capable = 1, | |
8e8be45e | 333 | .can_boost = rcu_can_boost(), |
2ec1f2d9 | 334 | .name = "rcu" |
d9a3da06 PM |
335 | }; |
336 | ||
c32e0660 PM |
337 | /* |
338 | * Definitions for rcu_bh torture testing. | |
339 | */ | |
340 | ||
a49a4af7 | 341 | static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH) |
c32e0660 PM |
342 | { |
343 | rcu_read_lock_bh(); | |
344 | return 0; | |
345 | } | |
346 | ||
a49a4af7 | 347 | static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH) |
c32e0660 PM |
348 | { |
349 | rcu_read_unlock_bh(); | |
350 | } | |
351 | ||
352 | static int rcu_bh_torture_completed(void) | |
353 | { | |
354 | return rcu_batches_completed_bh(); | |
355 | } | |
356 | ||
357 | static void rcu_bh_torture_deferred_free(struct rcu_torture *p) | |
358 | { | |
359 | call_rcu_bh(&p->rtort_rcu, rcu_torture_cb); | |
360 | } | |
361 | ||
362 | static struct rcu_torture_ops rcu_bh_ops = { | |
2ec1f2d9 | 363 | .init = rcu_sync_torture_init, |
0acc512c PM |
364 | .readlock = rcu_bh_torture_read_lock, |
365 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ | |
366 | .readunlock = rcu_bh_torture_read_unlock, | |
367 | .completed = rcu_bh_torture_completed, | |
368 | .deferred_free = rcu_bh_torture_deferred_free, | |
bdf2a436 | 369 | .sync = synchronize_rcu_bh, |
2ec1f2d9 | 370 | .exp_sync = synchronize_rcu_bh_expedited, |
fae4b54f | 371 | .call = call_rcu_bh, |
0acc512c | 372 | .cb_barrier = rcu_barrier_bh, |
bf66f18e | 373 | .fqs = rcu_bh_force_quiescent_state, |
0acc512c PM |
374 | .stats = NULL, |
375 | .irq_capable = 1, | |
376 | .name = "rcu_bh" | |
c32e0660 PM |
377 | }; |
378 | ||
b2896d2e PM |
379 | /* |
380 | * Definitions for srcu torture testing. | |
381 | */ | |
382 | ||
cda4dc81 | 383 | DEFINE_STATIC_SRCU(srcu_ctl); |
b2896d2e | 384 | |
012d3ca8 | 385 | static int srcu_torture_read_lock(void) __acquires(&srcu_ctl) |
b2896d2e PM |
386 | { |
387 | return srcu_read_lock(&srcu_ctl); | |
388 | } | |
389 | ||
51b1130e | 390 | static void srcu_read_delay(struct torture_random_state *rrsp) |
b2896d2e PM |
391 | { |
392 | long delay; | |
393 | const long uspertick = 1000000 / HZ; | |
394 | const long longdelay = 10; | |
395 | ||
396 | /* We want there to be long-running readers, but not all the time. */ | |
397 | ||
51b1130e PM |
398 | delay = torture_random(rrsp) % |
399 | (nrealreaders * 2 * longdelay * uspertick); | |
b2896d2e PM |
400 | if (!delay) |
401 | schedule_timeout_interruptible(longdelay); | |
e546f485 LJ |
402 | else |
403 | rcu_read_delay(rrsp); | |
b2896d2e PM |
404 | } |
405 | ||
012d3ca8 | 406 | static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl) |
b2896d2e PM |
407 | { |
408 | srcu_read_unlock(&srcu_ctl, idx); | |
409 | } | |
410 | ||
411 | static int srcu_torture_completed(void) | |
412 | { | |
413 | return srcu_batches_completed(&srcu_ctl); | |
414 | } | |
415 | ||
9059c940 LJ |
416 | static void srcu_torture_deferred_free(struct rcu_torture *rp) |
417 | { | |
418 | call_srcu(&srcu_ctl, &rp->rtort_rcu, rcu_torture_cb); | |
419 | } | |
420 | ||
b772e1dd JT |
421 | static void srcu_torture_synchronize(void) |
422 | { | |
423 | synchronize_srcu(&srcu_ctl); | |
424 | } | |
425 | ||
e3f8d378 PM |
426 | static void srcu_torture_call(struct rcu_head *head, |
427 | void (*func)(struct rcu_head *head)) | |
428 | { | |
429 | call_srcu(&srcu_ctl, head, func); | |
430 | } | |
431 | ||
432 | static void srcu_torture_barrier(void) | |
433 | { | |
434 | srcu_barrier(&srcu_ctl); | |
435 | } | |
436 | ||
d1008950 | 437 | static void srcu_torture_stats(char *page) |
b2896d2e | 438 | { |
b2896d2e PM |
439 | int cpu; |
440 | int idx = srcu_ctl.completed & 0x1; | |
441 | ||
d1008950 | 442 | page += sprintf(page, "%s%s per-CPU(idx=%d):", |
b2896d2e PM |
443 | torture_type, TORTURE_FLAG, idx); |
444 | for_each_possible_cpu(cpu) { | |
d1008950 | 445 | page += sprintf(page, " %d(%lu,%lu)", cpu, |
b2896d2e PM |
446 | per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx], |
447 | per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]); | |
448 | } | |
d1008950 | 449 | sprintf(page, "\n"); |
b2896d2e PM |
450 | } |
451 | ||
2ec1f2d9 PM |
452 | static void srcu_torture_synchronize_expedited(void) |
453 | { | |
454 | synchronize_srcu_expedited(&srcu_ctl); | |
455 | } | |
456 | ||
b2896d2e | 457 | static struct rcu_torture_ops srcu_ops = { |
cda4dc81 | 458 | .init = rcu_sync_torture_init, |
0acc512c PM |
459 | .readlock = srcu_torture_read_lock, |
460 | .read_delay = srcu_read_delay, | |
461 | .readunlock = srcu_torture_read_unlock, | |
462 | .completed = srcu_torture_completed, | |
9059c940 | 463 | .deferred_free = srcu_torture_deferred_free, |
0acc512c | 464 | .sync = srcu_torture_synchronize, |
2ec1f2d9 | 465 | .exp_sync = srcu_torture_synchronize_expedited, |
e3f8d378 PM |
466 | .call = srcu_torture_call, |
467 | .cb_barrier = srcu_torture_barrier, | |
0acc512c PM |
468 | .stats = srcu_torture_stats, |
469 | .name = "srcu" | |
b2896d2e PM |
470 | }; |
471 | ||
4b6c2cca JT |
472 | /* |
473 | * Definitions for sched torture testing. | |
474 | */ | |
475 | ||
476 | static int sched_torture_read_lock(void) | |
477 | { | |
478 | preempt_disable(); | |
479 | return 0; | |
480 | } | |
481 | ||
482 | static void sched_torture_read_unlock(int idx) | |
483 | { | |
484 | preempt_enable(); | |
485 | } | |
486 | ||
2326974d PM |
487 | static void rcu_sched_torture_deferred_free(struct rcu_torture *p) |
488 | { | |
489 | call_rcu_sched(&p->rtort_rcu, rcu_torture_cb); | |
490 | } | |
491 | ||
4b6c2cca | 492 | static struct rcu_torture_ops sched_ops = { |
0acc512c | 493 | .init = rcu_sync_torture_init, |
0acc512c PM |
494 | .readlock = sched_torture_read_lock, |
495 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ | |
496 | .readunlock = sched_torture_read_unlock, | |
d9a3da06 | 497 | .completed = rcu_no_completed, |
0acc512c | 498 | .deferred_free = rcu_sched_torture_deferred_free, |
bdf2a436 | 499 | .sync = synchronize_sched, |
2ec1f2d9 PM |
500 | .exp_sync = synchronize_sched_expedited, |
501 | .call = call_rcu_sched, | |
0acc512c | 502 | .cb_barrier = rcu_barrier_sched, |
bf66f18e | 503 | .fqs = rcu_sched_force_quiescent_state, |
0acc512c PM |
504 | .stats = NULL, |
505 | .irq_capable = 1, | |
506 | .name = "sched" | |
4b6c2cca JT |
507 | }; |
508 | ||
8e8be45e PM |
509 | /* |
510 | * RCU torture priority-boost testing. Runs one real-time thread per | |
511 | * CPU for moderate bursts, repeatedly registering RCU callbacks and | |
512 | * spinning waiting for them to be invoked. If a given callback takes | |
513 | * too long to be invoked, we assume that priority inversion has occurred. | |
514 | */ | |
515 | ||
516 | struct rcu_boost_inflight { | |
517 | struct rcu_head rcu; | |
518 | int inflight; | |
519 | }; | |
520 | ||
521 | static void rcu_torture_boost_cb(struct rcu_head *head) | |
522 | { | |
523 | struct rcu_boost_inflight *rbip = | |
524 | container_of(head, struct rcu_boost_inflight, rcu); | |
525 | ||
526 | smp_mb(); /* Ensure RCU-core accesses precede clearing ->inflight */ | |
527 | rbip->inflight = 0; | |
528 | } | |
529 | ||
530 | static int rcu_torture_boost(void *arg) | |
531 | { | |
532 | unsigned long call_rcu_time; | |
533 | unsigned long endtime; | |
534 | unsigned long oldstarttime; | |
535 | struct rcu_boost_inflight rbi = { .inflight = 0 }; | |
536 | struct sched_param sp; | |
537 | ||
5ccf60f2 | 538 | VERBOSE_TOROUT_STRING("rcu_torture_boost started"); |
8e8be45e PM |
539 | |
540 | /* Set real-time priority. */ | |
541 | sp.sched_priority = 1; | |
542 | if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) { | |
5ccf60f2 | 543 | VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!"); |
8e8be45e PM |
544 | n_rcu_torture_boost_rterror++; |
545 | } | |
546 | ||
561190e3 | 547 | init_rcu_head_on_stack(&rbi.rcu); |
8e8be45e PM |
548 | /* Each pass through the following loop does one boost-test cycle. */ |
549 | do { | |
550 | /* Wait for the next test interval. */ | |
551 | oldstarttime = boost_starttime; | |
93898fb1 | 552 | while (ULONG_CMP_LT(jiffies, oldstarttime)) { |
0e11c8e8 | 553 | schedule_timeout_interruptible(oldstarttime - jiffies); |
628edaa5 | 554 | stutter_wait("rcu_torture_boost"); |
36970bb9 | 555 | if (torture_must_stop()) |
8e8be45e PM |
556 | goto checkwait; |
557 | } | |
558 | ||
559 | /* Do one boost-test interval. */ | |
560 | endtime = oldstarttime + test_boost_duration * HZ; | |
561 | call_rcu_time = jiffies; | |
93898fb1 | 562 | while (ULONG_CMP_LT(jiffies, endtime)) { |
8e8be45e PM |
563 | /* If we don't have a callback in flight, post one. */ |
564 | if (!rbi.inflight) { | |
565 | smp_mb(); /* RCU core before ->inflight = 1. */ | |
566 | rbi.inflight = 1; | |
567 | call_rcu(&rbi.rcu, rcu_torture_boost_cb); | |
568 | if (jiffies - call_rcu_time > | |
569 | test_boost_duration * HZ - HZ / 2) { | |
5ccf60f2 | 570 | VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed"); |
8e8be45e PM |
571 | n_rcu_torture_boost_failure++; |
572 | } | |
573 | call_rcu_time = jiffies; | |
574 | } | |
575 | cond_resched(); | |
628edaa5 | 576 | stutter_wait("rcu_torture_boost"); |
36970bb9 | 577 | if (torture_must_stop()) |
8e8be45e PM |
578 | goto checkwait; |
579 | } | |
580 | ||
581 | /* | |
582 | * Set the start time of the next test interval. | |
583 | * Yes, this is vulnerable to long delays, but such | |
584 | * delays simply cause a false negative for the next | |
585 | * interval. Besides, we are running at RT priority, | |
586 | * so delays should be relatively rare. | |
587 | */ | |
ab8f11e5 PM |
588 | while (oldstarttime == boost_starttime && |
589 | !kthread_should_stop()) { | |
8e8be45e PM |
590 | if (mutex_trylock(&boost_mutex)) { |
591 | boost_starttime = jiffies + | |
592 | test_boost_interval * HZ; | |
593 | n_rcu_torture_boosts++; | |
594 | mutex_unlock(&boost_mutex); | |
595 | break; | |
596 | } | |
597 | schedule_timeout_uninterruptible(1); | |
598 | } | |
599 | ||
600 | /* Go do the stutter. */ | |
628edaa5 | 601 | checkwait: stutter_wait("rcu_torture_boost"); |
36970bb9 | 602 | } while (!torture_must_stop()); |
8e8be45e PM |
603 | |
604 | /* Clean up and exit. */ | |
7fafaac5 PM |
605 | while (!kthread_should_stop() || rbi.inflight) { |
606 | torture_shutdown_absorb("rcu_torture_boost"); | |
8e8be45e | 607 | schedule_timeout_uninterruptible(1); |
7fafaac5 | 608 | } |
8e8be45e | 609 | smp_mb(); /* order accesses to ->inflight before stack-frame death. */ |
9d68197c | 610 | destroy_rcu_head_on_stack(&rbi.rcu); |
7fafaac5 | 611 | torture_kthread_stopping("rcu_torture_boost"); |
8e8be45e PM |
612 | return 0; |
613 | } | |
614 | ||
bf66f18e PM |
615 | /* |
616 | * RCU torture force-quiescent-state kthread. Repeatedly induces | |
617 | * bursts of calls to force_quiescent_state(), increasing the probability | |
618 | * of occurrence of some important types of race conditions. | |
619 | */ | |
620 | static int | |
621 | rcu_torture_fqs(void *arg) | |
622 | { | |
623 | unsigned long fqs_resume_time; | |
624 | int fqs_burst_remaining; | |
625 | ||
5ccf60f2 | 626 | VERBOSE_TOROUT_STRING("rcu_torture_fqs task started"); |
bf66f18e PM |
627 | do { |
628 | fqs_resume_time = jiffies + fqs_stutter * HZ; | |
93898fb1 PM |
629 | while (ULONG_CMP_LT(jiffies, fqs_resume_time) && |
630 | !kthread_should_stop()) { | |
bf66f18e PM |
631 | schedule_timeout_interruptible(1); |
632 | } | |
633 | fqs_burst_remaining = fqs_duration; | |
93898fb1 PM |
634 | while (fqs_burst_remaining > 0 && |
635 | !kthread_should_stop()) { | |
bf66f18e PM |
636 | cur_ops->fqs(); |
637 | udelay(fqs_holdoff); | |
638 | fqs_burst_remaining -= fqs_holdoff; | |
639 | } | |
628edaa5 | 640 | stutter_wait("rcu_torture_fqs"); |
36970bb9 | 641 | } while (!torture_must_stop()); |
7fafaac5 | 642 | torture_kthread_stopping("rcu_torture_fqs"); |
bf66f18e PM |
643 | return 0; |
644 | } | |
645 | ||
a241ec65 PM |
646 | /* |
647 | * RCU torture writer kthread. Repeatedly substitutes a new structure | |
648 | * for that pointed to by rcu_torture_current, freeing the old structure | |
649 | * after a series of grace periods (the "pipeline"). | |
650 | */ | |
651 | static int | |
652 | rcu_torture_writer(void *arg) | |
653 | { | |
2ec1f2d9 | 654 | bool exp; |
a241ec65 | 655 | int i; |
a241ec65 | 656 | struct rcu_torture *rp; |
2ec1f2d9 | 657 | struct rcu_torture *rp1; |
a241ec65 | 658 | struct rcu_torture *old_rp; |
51b1130e | 659 | static DEFINE_TORTURE_RANDOM(rand); |
a241ec65 | 660 | |
5ccf60f2 | 661 | VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); |
dbdf65b1 IM |
662 | set_user_nice(current, 19); |
663 | ||
a241ec65 PM |
664 | do { |
665 | schedule_timeout_uninterruptible(1); | |
a71fca58 PM |
666 | rp = rcu_torture_alloc(); |
667 | if (rp == NULL) | |
a241ec65 PM |
668 | continue; |
669 | rp->rtort_pipe_count = 0; | |
51b1130e | 670 | udelay(torture_random(&rand) & 0x3ff); |
0ddea0ea PM |
671 | old_rp = rcu_dereference_check(rcu_torture_current, |
672 | current == writer_task); | |
996417d2 | 673 | rp->rtort_mbtest = 1; |
a241ec65 | 674 | rcu_assign_pointer(rcu_torture_current, rp); |
9b2619af | 675 | smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ |
c8e5b163 | 676 | if (old_rp) { |
a241ec65 PM |
677 | i = old_rp->rtort_pipe_count; |
678 | if (i > RCU_TORTURE_PIPE_LEN) | |
679 | i = RCU_TORTURE_PIPE_LEN; | |
680 | atomic_inc(&rcu_torture_wcount[i]); | |
681 | old_rp->rtort_pipe_count++; | |
2ec1f2d9 | 682 | if (gp_normal == gp_exp) |
51b1130e | 683 | exp = !!(torture_random(&rand) & 0x80); |
2ec1f2d9 PM |
684 | else |
685 | exp = gp_exp; | |
686 | if (!exp) { | |
687 | cur_ops->deferred_free(old_rp); | |
688 | } else { | |
689 | cur_ops->exp_sync(); | |
690 | list_add(&old_rp->rtort_free, | |
691 | &rcu_torture_removed); | |
692 | list_for_each_entry_safe(rp, rp1, | |
693 | &rcu_torture_removed, | |
694 | rtort_free) { | |
695 | i = rp->rtort_pipe_count; | |
696 | if (i > RCU_TORTURE_PIPE_LEN) | |
697 | i = RCU_TORTURE_PIPE_LEN; | |
698 | atomic_inc(&rcu_torture_wcount[i]); | |
699 | if (++rp->rtort_pipe_count >= | |
700 | RCU_TORTURE_PIPE_LEN) { | |
701 | rp->rtort_mbtest = 0; | |
702 | list_del(&rp->rtort_free); | |
703 | rcu_torture_free(rp); | |
704 | } | |
705 | } | |
706 | } | |
a241ec65 | 707 | } |
4a298656 | 708 | rcutorture_record_progress(++rcu_torture_current_version); |
628edaa5 | 709 | stutter_wait("rcu_torture_writer"); |
36970bb9 | 710 | } while (!torture_must_stop()); |
7fafaac5 | 711 | torture_kthread_stopping("rcu_torture_writer"); |
a241ec65 PM |
712 | return 0; |
713 | } | |
714 | ||
b772e1dd JT |
715 | /* |
716 | * RCU torture fake writer kthread. Repeatedly calls sync, with a random | |
717 | * delay between calls. | |
718 | */ | |
719 | static int | |
720 | rcu_torture_fakewriter(void *arg) | |
721 | { | |
51b1130e | 722 | DEFINE_TORTURE_RANDOM(rand); |
b772e1dd | 723 | |
5ccf60f2 | 724 | VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started"); |
b772e1dd JT |
725 | set_user_nice(current, 19); |
726 | ||
727 | do { | |
51b1130e PM |
728 | schedule_timeout_uninterruptible(1 + torture_random(&rand)%10); |
729 | udelay(torture_random(&rand) & 0x3ff); | |
72472a02 | 730 | if (cur_ops->cb_barrier != NULL && |
51b1130e | 731 | torture_random(&rand) % (nfakewriters * 8) == 0) { |
72472a02 | 732 | cur_ops->cb_barrier(); |
2ec1f2d9 | 733 | } else if (gp_normal == gp_exp) { |
51b1130e | 734 | if (torture_random(&rand) & 0x80) |
2ec1f2d9 PM |
735 | cur_ops->sync(); |
736 | else | |
737 | cur_ops->exp_sync(); | |
738 | } else if (gp_normal) { | |
72472a02 | 739 | cur_ops->sync(); |
2ec1f2d9 PM |
740 | } else { |
741 | cur_ops->exp_sync(); | |
742 | } | |
628edaa5 | 743 | stutter_wait("rcu_torture_fakewriter"); |
36970bb9 | 744 | } while (!torture_must_stop()); |
b772e1dd | 745 | |
7fafaac5 | 746 | torture_kthread_stopping("rcu_torture_fakewriter"); |
b772e1dd JT |
747 | return 0; |
748 | } | |
749 | ||
91afaf30 PM |
750 | void rcutorture_trace_dump(void) |
751 | { | |
752 | static atomic_t beenhere = ATOMIC_INIT(0); | |
753 | ||
754 | if (atomic_read(&beenhere)) | |
755 | return; | |
756 | if (atomic_xchg(&beenhere, 1) != 0) | |
757 | return; | |
91afaf30 PM |
758 | ftrace_dump(DUMP_ALL); |
759 | } | |
760 | ||
0729fbf3 PM |
761 | /* |
762 | * RCU torture reader from timer handler. Dereferences rcu_torture_current, | |
763 | * incrementing the corresponding element of the pipeline array. The | |
764 | * counter in the element should never be greater than 1, otherwise, the | |
765 | * RCU implementation is broken. | |
766 | */ | |
767 | static void rcu_torture_timer(unsigned long unused) | |
768 | { | |
769 | int idx; | |
770 | int completed; | |
52494535 | 771 | int completed_end; |
51b1130e | 772 | static DEFINE_TORTURE_RANDOM(rand); |
0729fbf3 PM |
773 | static DEFINE_SPINLOCK(rand_lock); |
774 | struct rcu_torture *p; | |
775 | int pipe_count; | |
52494535 | 776 | unsigned long long ts; |
0729fbf3 PM |
777 | |
778 | idx = cur_ops->readlock(); | |
779 | completed = cur_ops->completed(); | |
e4aa0da3 | 780 | ts = rcu_trace_clock_local(); |
632ee200 | 781 | p = rcu_dereference_check(rcu_torture_current, |
632ee200 PM |
782 | rcu_read_lock_bh_held() || |
783 | rcu_read_lock_sched_held() || | |
784 | srcu_read_lock_held(&srcu_ctl)); | |
0729fbf3 PM |
785 | if (p == NULL) { |
786 | /* Leave because rcu_torture_writer is not yet underway */ | |
787 | cur_ops->readunlock(idx); | |
788 | return; | |
789 | } | |
790 | if (p->rtort_mbtest == 0) | |
791 | atomic_inc(&n_rcu_torture_mberror); | |
792 | spin_lock(&rand_lock); | |
0acc512c | 793 | cur_ops->read_delay(&rand); |
0729fbf3 PM |
794 | n_rcu_torture_timers++; |
795 | spin_unlock(&rand_lock); | |
796 | preempt_disable(); | |
797 | pipe_count = p->rtort_pipe_count; | |
798 | if (pipe_count > RCU_TORTURE_PIPE_LEN) { | |
799 | /* Should not happen, but... */ | |
800 | pipe_count = RCU_TORTURE_PIPE_LEN; | |
801 | } | |
52494535 PM |
802 | completed_end = cur_ops->completed(); |
803 | if (pipe_count > 1) { | |
52494535 PM |
804 | do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts, |
805 | completed, completed_end); | |
91afaf30 | 806 | rcutorture_trace_dump(); |
52494535 | 807 | } |
dd17c8f7 | 808 | __this_cpu_inc(rcu_torture_count[pipe_count]); |
52494535 | 809 | completed = completed_end - completed; |
0729fbf3 PM |
810 | if (completed > RCU_TORTURE_PIPE_LEN) { |
811 | /* Should not happen, but... */ | |
812 | completed = RCU_TORTURE_PIPE_LEN; | |
813 | } | |
dd17c8f7 | 814 | __this_cpu_inc(rcu_torture_batch[completed]); |
0729fbf3 PM |
815 | preempt_enable(); |
816 | cur_ops->readunlock(idx); | |
817 | } | |
818 | ||
a241ec65 PM |
819 | /* |
820 | * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current, | |
821 | * incrementing the corresponding element of the pipeline array. The | |
822 | * counter in the element should never be greater than 1, otherwise, the | |
823 | * RCU implementation is broken. | |
824 | */ | |
825 | static int | |
826 | rcu_torture_reader(void *arg) | |
827 | { | |
828 | int completed; | |
52494535 | 829 | int completed_end; |
72e9bb54 | 830 | int idx; |
51b1130e | 831 | DEFINE_TORTURE_RANDOM(rand); |
a241ec65 PM |
832 | struct rcu_torture *p; |
833 | int pipe_count; | |
0729fbf3 | 834 | struct timer_list t; |
52494535 | 835 | unsigned long long ts; |
a241ec65 | 836 | |
5ccf60f2 | 837 | VERBOSE_TOROUT_STRING("rcu_torture_reader task started"); |
dbdf65b1 | 838 | set_user_nice(current, 19); |
0acc512c | 839 | if (irqreader && cur_ops->irq_capable) |
0729fbf3 | 840 | setup_timer_on_stack(&t, rcu_torture_timer, 0); |
dbdf65b1 | 841 | |
a241ec65 | 842 | do { |
0acc512c | 843 | if (irqreader && cur_ops->irq_capable) { |
0729fbf3 | 844 | if (!timer_pending(&t)) |
6155fec9 | 845 | mod_timer(&t, jiffies + 1); |
0729fbf3 | 846 | } |
72e9bb54 PM |
847 | idx = cur_ops->readlock(); |
848 | completed = cur_ops->completed(); | |
e4aa0da3 | 849 | ts = rcu_trace_clock_local(); |
632ee200 | 850 | p = rcu_dereference_check(rcu_torture_current, |
632ee200 PM |
851 | rcu_read_lock_bh_held() || |
852 | rcu_read_lock_sched_held() || | |
853 | srcu_read_lock_held(&srcu_ctl)); | |
a241ec65 PM |
854 | if (p == NULL) { |
855 | /* Wait for rcu_torture_writer to get underway */ | |
72e9bb54 | 856 | cur_ops->readunlock(idx); |
a241ec65 PM |
857 | schedule_timeout_interruptible(HZ); |
858 | continue; | |
859 | } | |
996417d2 PM |
860 | if (p->rtort_mbtest == 0) |
861 | atomic_inc(&n_rcu_torture_mberror); | |
0acc512c | 862 | cur_ops->read_delay(&rand); |
a241ec65 PM |
863 | preempt_disable(); |
864 | pipe_count = p->rtort_pipe_count; | |
865 | if (pipe_count > RCU_TORTURE_PIPE_LEN) { | |
866 | /* Should not happen, but... */ | |
867 | pipe_count = RCU_TORTURE_PIPE_LEN; | |
868 | } | |
52494535 PM |
869 | completed_end = cur_ops->completed(); |
870 | if (pipe_count > 1) { | |
52494535 PM |
871 | do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, |
872 | ts, completed, completed_end); | |
91afaf30 | 873 | rcutorture_trace_dump(); |
52494535 | 874 | } |
dd17c8f7 | 875 | __this_cpu_inc(rcu_torture_count[pipe_count]); |
52494535 | 876 | completed = completed_end - completed; |
a241ec65 PM |
877 | if (completed > RCU_TORTURE_PIPE_LEN) { |
878 | /* Should not happen, but... */ | |
879 | completed = RCU_TORTURE_PIPE_LEN; | |
880 | } | |
dd17c8f7 | 881 | __this_cpu_inc(rcu_torture_batch[completed]); |
a241ec65 | 882 | preempt_enable(); |
72e9bb54 | 883 | cur_ops->readunlock(idx); |
a241ec65 | 884 | schedule(); |
628edaa5 | 885 | stutter_wait("rcu_torture_reader"); |
36970bb9 | 886 | } while (!torture_must_stop()); |
0acc512c | 887 | if (irqreader && cur_ops->irq_capable) |
0729fbf3 | 888 | del_timer_sync(&t); |
7fafaac5 | 889 | torture_kthread_stopping("rcu_torture_reader"); |
a241ec65 PM |
890 | return 0; |
891 | } | |
892 | ||
893 | /* | |
894 | * Create an RCU-torture statistics message in the specified buffer. | |
895 | */ | |
d1008950 | 896 | static void |
a241ec65 PM |
897 | rcu_torture_printk(char *page) |
898 | { | |
a241ec65 PM |
899 | int cpu; |
900 | int i; | |
901 | long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; | |
902 | long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; | |
903 | ||
0a945022 | 904 | for_each_possible_cpu(cpu) { |
a241ec65 PM |
905 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { |
906 | pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i]; | |
907 | batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i]; | |
908 | } | |
909 | } | |
910 | for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) { | |
911 | if (pipesummary[i] != 0) | |
912 | break; | |
913 | } | |
d1008950 CG |
914 | page += sprintf(page, "%s%s ", torture_type, TORTURE_FLAG); |
915 | page += sprintf(page, | |
5cf05ad7 | 916 | "rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ", |
a241ec65 PM |
917 | rcu_torture_current, |
918 | rcu_torture_current_version, | |
919 | list_empty(&rcu_torture_freelist), | |
920 | atomic_read(&n_rcu_torture_alloc), | |
921 | atomic_read(&n_rcu_torture_alloc_fail), | |
5cf05ad7 | 922 | atomic_read(&n_rcu_torture_free)); |
d1008950 | 923 | page += sprintf(page, "rtmbe: %d rtbke: %ld rtbre: %ld ", |
0729fbf3 | 924 | atomic_read(&n_rcu_torture_mberror), |
8e8be45e | 925 | n_rcu_torture_boost_ktrerror, |
5cf05ad7 | 926 | n_rcu_torture_boost_rterror); |
d1008950 | 927 | page += sprintf(page, "rtbf: %ld rtb: %ld nt: %ld ", |
8e8be45e PM |
928 | n_rcu_torture_boost_failure, |
929 | n_rcu_torture_boosts, | |
5cf05ad7 | 930 | n_rcu_torture_timers); |
2e9e8081 | 931 | page = torture_onoff_stats(page); |
d1008950 | 932 | page += sprintf(page, "barrier: %ld/%ld:%ld", |
fae4b54f PM |
933 | n_barrier_successes, |
934 | n_barrier_attempts, | |
935 | n_rcu_torture_barrier_error); | |
d1008950 | 936 | page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG); |
8e8be45e | 937 | if (atomic_read(&n_rcu_torture_mberror) != 0 || |
fae4b54f | 938 | n_rcu_torture_barrier_error != 0 || |
8e8be45e PM |
939 | n_rcu_torture_boost_ktrerror != 0 || |
940 | n_rcu_torture_boost_rterror != 0 || | |
fae4b54f PM |
941 | n_rcu_torture_boost_failure != 0 || |
942 | i > 1) { | |
d1008950 | 943 | page += sprintf(page, "!!! "); |
996417d2 | 944 | atomic_inc(&n_rcu_torture_error); |
5af970a4 | 945 | WARN_ON_ONCE(1); |
996417d2 | 946 | } |
d1008950 | 947 | page += sprintf(page, "Reader Pipe: "); |
a241ec65 | 948 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) |
d1008950 CG |
949 | page += sprintf(page, " %ld", pipesummary[i]); |
950 | page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG); | |
951 | page += sprintf(page, "Reader Batch: "); | |
72e9bb54 | 952 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) |
d1008950 CG |
953 | page += sprintf(page, " %ld", batchsummary[i]); |
954 | page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG); | |
955 | page += sprintf(page, "Free-Block Circulation: "); | |
a241ec65 | 956 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { |
d1008950 | 957 | page += sprintf(page, " %d", |
a241ec65 PM |
958 | atomic_read(&rcu_torture_wcount[i])); |
959 | } | |
d1008950 | 960 | page += sprintf(page, "\n"); |
c8e5b163 | 961 | if (cur_ops->stats) |
d1008950 | 962 | cur_ops->stats(page); |
a241ec65 PM |
963 | } |
964 | ||
965 | /* | |
966 | * Print torture statistics. Caller must ensure that there is only | |
967 | * one call to this function at a given time!!! This is normally | |
968 | * accomplished by relying on the module system to only have one copy | |
969 | * of the module loaded, and then by giving the rcu_torture_stats | |
970 | * kthread full control (or the init/cleanup functions when rcu_torture_stats | |
971 | * thread is not running). | |
972 | */ | |
973 | static void | |
974 | rcu_torture_stats_print(void) | |
975 | { | |
d1008950 CG |
976 | int size = nr_cpu_ids * 200 + 8192; |
977 | char *buf; | |
a241ec65 | 978 | |
d1008950 CG |
979 | buf = kmalloc(size, GFP_KERNEL); |
980 | if (!buf) { | |
981 | pr_err("rcu-torture: Out of memory, need: %d", size); | |
982 | return; | |
983 | } | |
984 | rcu_torture_printk(buf); | |
985 | pr_alert("%s", buf); | |
986 | kfree(buf); | |
a241ec65 PM |
987 | } |
988 | ||
989 | /* | |
990 | * Periodically prints torture statistics, if periodic statistics printing | |
991 | * was specified via the stat_interval module parameter. | |
a241ec65 PM |
992 | */ |
993 | static int | |
994 | rcu_torture_stats(void *arg) | |
995 | { | |
5ccf60f2 | 996 | VERBOSE_TOROUT_STRING("rcu_torture_stats task started"); |
a241ec65 PM |
997 | do { |
998 | schedule_timeout_interruptible(stat_interval * HZ); | |
999 | rcu_torture_stats_print(); | |
f67a3356 | 1000 | torture_shutdown_absorb("rcu_torture_stats"); |
36970bb9 | 1001 | } while (!torture_must_stop()); |
7fafaac5 | 1002 | torture_kthread_stopping("rcu_torture_stats"); |
a241ec65 PM |
1003 | return 0; |
1004 | } | |
1005 | ||
95c38322 | 1006 | static inline void |
e66c33d5 | 1007 | rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) |
95c38322 | 1008 | { |
2caa1e44 PM |
1009 | pr_alert("%s" TORTURE_FLAG |
1010 | "--- %s: nreaders=%d nfakewriters=%d " | |
1011 | "stat_interval=%d verbose=%d test_no_idle_hz=%d " | |
1012 | "shuffle_interval=%d stutter=%d irqreader=%d " | |
1013 | "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " | |
1014 | "test_boost=%d/%d test_boost_interval=%d " | |
1015 | "test_boost_duration=%d shutdown_secs=%d " | |
67afeed2 PM |
1016 | "stall_cpu=%d stall_cpu_holdoff=%d " |
1017 | "n_barrier_cbs=%d " | |
2caa1e44 PM |
1018 | "onoff_interval=%d onoff_holdoff=%d\n", |
1019 | torture_type, tag, nrealreaders, nfakewriters, | |
1020 | stat_interval, verbose, test_no_idle_hz, shuffle_interval, | |
1021 | stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, | |
1022 | test_boost, cur_ops->can_boost, | |
1023 | test_boost_interval, test_boost_duration, shutdown_secs, | |
67afeed2 PM |
1024 | stall_cpu, stall_cpu_holdoff, |
1025 | n_barrier_cbs, | |
2caa1e44 | 1026 | onoff_interval, onoff_holdoff); |
95c38322 PM |
1027 | } |
1028 | ||
8e8be45e PM |
1029 | static void rcutorture_booster_cleanup(int cpu) |
1030 | { | |
1031 | struct task_struct *t; | |
1032 | ||
1033 | if (boost_tasks[cpu] == NULL) | |
1034 | return; | |
1035 | mutex_lock(&boost_mutex); | |
5ccf60f2 | 1036 | VERBOSE_TOROUT_STRING("Stopping rcu_torture_boost task"); |
8e8be45e PM |
1037 | t = boost_tasks[cpu]; |
1038 | boost_tasks[cpu] = NULL; | |
1039 | mutex_unlock(&boost_mutex); | |
1040 | ||
1041 | /* This must be outside of the mutex, otherwise deadlock! */ | |
1042 | kthread_stop(t); | |
37e377d2 | 1043 | boost_tasks[cpu] = NULL; |
8e8be45e PM |
1044 | } |
1045 | ||
1046 | static int rcutorture_booster_init(int cpu) | |
1047 | { | |
1048 | int retval; | |
1049 | ||
1050 | if (boost_tasks[cpu] != NULL) | |
1051 | return 0; /* Already created, nothing more to do. */ | |
1052 | ||
1053 | /* Don't allow time recalculation while creating a new task. */ | |
1054 | mutex_lock(&boost_mutex); | |
5ccf60f2 | 1055 | VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task"); |
1f288094 ED |
1056 | boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL, |
1057 | cpu_to_node(cpu), | |
1058 | "rcu_torture_boost"); | |
8e8be45e PM |
1059 | if (IS_ERR(boost_tasks[cpu])) { |
1060 | retval = PTR_ERR(boost_tasks[cpu]); | |
5ccf60f2 | 1061 | VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed"); |
8e8be45e PM |
1062 | n_rcu_torture_boost_ktrerror++; |
1063 | boost_tasks[cpu] = NULL; | |
1064 | mutex_unlock(&boost_mutex); | |
1065 | return retval; | |
1066 | } | |
1067 | kthread_bind(boost_tasks[cpu], cpu); | |
1068 | wake_up_process(boost_tasks[cpu]); | |
1069 | mutex_unlock(&boost_mutex); | |
1070 | return 0; | |
1071 | } | |
1072 | ||
c13f3757 PM |
1073 | /* |
1074 | * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then | |
1075 | * induces a CPU stall for the time specified by stall_cpu. | |
1076 | */ | |
49fb4c62 | 1077 | static int rcu_torture_stall(void *args) |
c13f3757 PM |
1078 | { |
1079 | unsigned long stop_at; | |
1080 | ||
5ccf60f2 | 1081 | VERBOSE_TOROUT_STRING("rcu_torture_stall task started"); |
c13f3757 | 1082 | if (stall_cpu_holdoff > 0) { |
5ccf60f2 | 1083 | VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff"); |
c13f3757 | 1084 | schedule_timeout_interruptible(stall_cpu_holdoff * HZ); |
5ccf60f2 | 1085 | VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff"); |
c13f3757 PM |
1086 | } |
1087 | if (!kthread_should_stop()) { | |
1088 | stop_at = get_seconds() + stall_cpu; | |
1089 | /* RCU CPU stall is expected behavior in following code. */ | |
2caa1e44 | 1090 | pr_alert("rcu_torture_stall start.\n"); |
c13f3757 PM |
1091 | rcu_read_lock(); |
1092 | preempt_disable(); | |
1093 | while (ULONG_CMP_LT(get_seconds(), stop_at)) | |
1094 | continue; /* Induce RCU CPU stall warning. */ | |
1095 | preempt_enable(); | |
1096 | rcu_read_unlock(); | |
2caa1e44 | 1097 | pr_alert("rcu_torture_stall end.\n"); |
c13f3757 | 1098 | } |
f67a3356 | 1099 | torture_shutdown_absorb("rcu_torture_stall"); |
c13f3757 PM |
1100 | while (!kthread_should_stop()) |
1101 | schedule_timeout_interruptible(10 * HZ); | |
1102 | return 0; | |
1103 | } | |
1104 | ||
1105 | /* Spawn CPU-stall kthread, if stall_cpu specified. */ | |
1106 | static int __init rcu_torture_stall_init(void) | |
1107 | { | |
c13f3757 PM |
1108 | if (stall_cpu <= 0) |
1109 | return 0; | |
47cf29b9 | 1110 | return torture_create_kthread(rcu_torture_stall, NULL, stall_task); |
c13f3757 PM |
1111 | } |
1112 | ||
1113 | /* Clean up after the CPU-stall kthread, if one was spawned. */ | |
1114 | static void rcu_torture_stall_cleanup(void) | |
1115 | { | |
1116 | if (stall_task == NULL) | |
1117 | return; | |
5ccf60f2 | 1118 | VERBOSE_TOROUT_STRING("Stopping rcu_torture_stall_task."); |
c13f3757 | 1119 | kthread_stop(stall_task); |
37e377d2 | 1120 | stall_task = NULL; |
c13f3757 PM |
1121 | } |
1122 | ||
fae4b54f PM |
1123 | /* Callback function for RCU barrier testing. */ |
1124 | void rcu_torture_barrier_cbf(struct rcu_head *rcu) | |
1125 | { | |
1126 | atomic_inc(&barrier_cbs_invoked); | |
1127 | } | |
1128 | ||
1129 | /* kthread function to register callbacks used to test RCU barriers. */ | |
1130 | static int rcu_torture_barrier_cbs(void *arg) | |
1131 | { | |
1132 | long myid = (long)arg; | |
c6ebcbb6 | 1133 | bool lastphase = 0; |
78e4bc34 | 1134 | bool newphase; |
fae4b54f PM |
1135 | struct rcu_head rcu; |
1136 | ||
1137 | init_rcu_head_on_stack(&rcu); | |
5ccf60f2 | 1138 | VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started"); |
fae4b54f PM |
1139 | set_user_nice(current, 19); |
1140 | do { | |
1141 | wait_event(barrier_cbs_wq[myid], | |
78e4bc34 PM |
1142 | (newphase = |
1143 | ACCESS_ONCE(barrier_phase)) != lastphase || | |
36970bb9 | 1144 | torture_must_stop()); |
78e4bc34 | 1145 | lastphase = newphase; |
c6ebcbb6 | 1146 | smp_mb(); /* ensure barrier_phase load before ->call(). */ |
36970bb9 | 1147 | if (torture_must_stop()) |
fae4b54f PM |
1148 | break; |
1149 | cur_ops->call(&rcu, rcu_torture_barrier_cbf); | |
1150 | if (atomic_dec_and_test(&barrier_cbs_count)) | |
1151 | wake_up(&barrier_wq); | |
36970bb9 | 1152 | } while (!torture_must_stop()); |
fae4b54f PM |
1153 | cur_ops->cb_barrier(); |
1154 | destroy_rcu_head_on_stack(&rcu); | |
7fafaac5 | 1155 | torture_kthread_stopping("rcu_torture_barrier_cbs"); |
fae4b54f PM |
1156 | return 0; |
1157 | } | |
1158 | ||
1159 | /* kthread function to drive and coordinate RCU barrier testing. */ | |
1160 | static int rcu_torture_barrier(void *arg) | |
1161 | { | |
1162 | int i; | |
1163 | ||
5ccf60f2 | 1164 | VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting"); |
fae4b54f PM |
1165 | do { |
1166 | atomic_set(&barrier_cbs_invoked, 0); | |
1167 | atomic_set(&barrier_cbs_count, n_barrier_cbs); | |
c6ebcbb6 PM |
1168 | smp_mb(); /* Ensure barrier_phase after prior assignments. */ |
1169 | barrier_phase = !barrier_phase; | |
fae4b54f PM |
1170 | for (i = 0; i < n_barrier_cbs; i++) |
1171 | wake_up(&barrier_cbs_wq[i]); | |
1172 | wait_event(barrier_wq, | |
1173 | atomic_read(&barrier_cbs_count) == 0 || | |
36970bb9 PM |
1174 | torture_must_stop()); |
1175 | if (torture_must_stop()) | |
fae4b54f PM |
1176 | break; |
1177 | n_barrier_attempts++; | |
78e4bc34 | 1178 | cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ |
fae4b54f PM |
1179 | if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { |
1180 | n_rcu_torture_barrier_error++; | |
1181 | WARN_ON_ONCE(1); | |
1182 | } | |
1183 | n_barrier_successes++; | |
1184 | schedule_timeout_interruptible(HZ / 10); | |
36970bb9 | 1185 | } while (!torture_must_stop()); |
7fafaac5 | 1186 | torture_kthread_stopping("rcu_torture_barrier"); |
fae4b54f PM |
1187 | return 0; |
1188 | } | |
1189 | ||
1190 | /* Initialize RCU barrier testing. */ | |
1191 | static int rcu_torture_barrier_init(void) | |
1192 | { | |
1193 | int i; | |
1194 | int ret; | |
1195 | ||
1196 | if (n_barrier_cbs == 0) | |
1197 | return 0; | |
1198 | if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) { | |
2caa1e44 PM |
1199 | pr_alert("%s" TORTURE_FLAG |
1200 | " Call or barrier ops missing for %s,\n", | |
1201 | torture_type, cur_ops->name); | |
1202 | pr_alert("%s" TORTURE_FLAG | |
1203 | " RCU barrier testing omitted from run.\n", | |
1204 | torture_type); | |
fae4b54f PM |
1205 | return 0; |
1206 | } | |
1207 | atomic_set(&barrier_cbs_count, 0); | |
1208 | atomic_set(&barrier_cbs_invoked, 0); | |
1209 | barrier_cbs_tasks = | |
1210 | kzalloc(n_barrier_cbs * sizeof(barrier_cbs_tasks[0]), | |
1211 | GFP_KERNEL); | |
1212 | barrier_cbs_wq = | |
1213 | kzalloc(n_barrier_cbs * sizeof(barrier_cbs_wq[0]), | |
1214 | GFP_KERNEL); | |
de5e6437 | 1215 | if (barrier_cbs_tasks == NULL || !barrier_cbs_wq) |
fae4b54f PM |
1216 | return -ENOMEM; |
1217 | for (i = 0; i < n_barrier_cbs; i++) { | |
1218 | init_waitqueue_head(&barrier_cbs_wq[i]); | |
47cf29b9 PM |
1219 | ret = torture_create_kthread(rcu_torture_barrier_cbs, |
1220 | (void *)(long)i, | |
1221 | barrier_cbs_tasks[i]); | |
1222 | if (ret) | |
fae4b54f | 1223 | return ret; |
fae4b54f | 1224 | } |
47cf29b9 | 1225 | return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task); |
fae4b54f PM |
1226 | } |
1227 | ||
1228 | /* Clean up after RCU barrier testing. */ | |
1229 | static void rcu_torture_barrier_cleanup(void) | |
1230 | { | |
1231 | int i; | |
1232 | ||
1233 | if (barrier_task != NULL) { | |
5ccf60f2 | 1234 | VERBOSE_TOROUT_STRING("Stopping rcu_torture_barrier task"); |
fae4b54f PM |
1235 | kthread_stop(barrier_task); |
1236 | barrier_task = NULL; | |
1237 | } | |
1238 | if (barrier_cbs_tasks != NULL) { | |
1239 | for (i = 0; i < n_barrier_cbs; i++) { | |
1240 | if (barrier_cbs_tasks[i] != NULL) { | |
5ccf60f2 | 1241 | VERBOSE_TOROUT_STRING("Stopping rcu_torture_barrier_cbs task"); |
fae4b54f PM |
1242 | kthread_stop(barrier_cbs_tasks[i]); |
1243 | barrier_cbs_tasks[i] = NULL; | |
1244 | } | |
1245 | } | |
1246 | kfree(barrier_cbs_tasks); | |
1247 | barrier_cbs_tasks = NULL; | |
1248 | } | |
1249 | if (barrier_cbs_wq != NULL) { | |
1250 | kfree(barrier_cbs_wq); | |
1251 | barrier_cbs_wq = NULL; | |
1252 | } | |
1253 | } | |
1254 | ||
8e8be45e PM |
1255 | static int rcutorture_cpu_notify(struct notifier_block *self, |
1256 | unsigned long action, void *hcpu) | |
1257 | { | |
1258 | long cpu = (long)hcpu; | |
1259 | ||
1260 | switch (action) { | |
1261 | case CPU_ONLINE: | |
1262 | case CPU_DOWN_FAILED: | |
1263 | (void)rcutorture_booster_init(cpu); | |
1264 | break; | |
1265 | case CPU_DOWN_PREPARE: | |
1266 | rcutorture_booster_cleanup(cpu); | |
1267 | break; | |
1268 | default: | |
1269 | break; | |
1270 | } | |
1271 | return NOTIFY_OK; | |
1272 | } | |
1273 | ||
1274 | static struct notifier_block rcutorture_cpu_nb = { | |
1275 | .notifier_call = rcutorture_cpu_notify, | |
1276 | }; | |
1277 | ||
a241ec65 PM |
1278 | static void |
1279 | rcu_torture_cleanup(void) | |
1280 | { | |
1281 | int i; | |
1282 | ||
4a298656 | 1283 | rcutorture_record_test_transition(); |
cc47ae08 | 1284 | if (torture_cleanup()) { |
343e9099 PM |
1285 | if (cur_ops->cb_barrier != NULL) |
1286 | cur_ops->cb_barrier(); | |
1287 | return; | |
1288 | } | |
3808dc9f | 1289 | |
fae4b54f | 1290 | rcu_torture_barrier_cleanup(); |
c13f3757 | 1291 | rcu_torture_stall_cleanup(); |
628edaa5 | 1292 | torture_stutter_cleanup(); |
d84f5203 | 1293 | |
c8e5b163 | 1294 | if (writer_task) { |
5ccf60f2 | 1295 | VERBOSE_TOROUT_STRING("Stopping rcu_torture_writer task"); |
a241ec65 PM |
1296 | kthread_stop(writer_task); |
1297 | } | |
1298 | writer_task = NULL; | |
1299 | ||
c8e5b163 | 1300 | if (reader_tasks) { |
a241ec65 | 1301 | for (i = 0; i < nrealreaders; i++) { |
c8e5b163 | 1302 | if (reader_tasks[i]) { |
5ccf60f2 | 1303 | VERBOSE_TOROUT_STRING( |
a241ec65 PM |
1304 | "Stopping rcu_torture_reader task"); |
1305 | kthread_stop(reader_tasks[i]); | |
1306 | } | |
1307 | reader_tasks[i] = NULL; | |
1308 | } | |
1309 | kfree(reader_tasks); | |
1310 | reader_tasks = NULL; | |
1311 | } | |
1312 | rcu_torture_current = NULL; | |
1313 | ||
c8e5b163 | 1314 | if (fakewriter_tasks) { |
b772e1dd | 1315 | for (i = 0; i < nfakewriters; i++) { |
c8e5b163 | 1316 | if (fakewriter_tasks[i]) { |
5ccf60f2 | 1317 | VERBOSE_TOROUT_STRING( |
b772e1dd JT |
1318 | "Stopping rcu_torture_fakewriter task"); |
1319 | kthread_stop(fakewriter_tasks[i]); | |
1320 | } | |
1321 | fakewriter_tasks[i] = NULL; | |
1322 | } | |
1323 | kfree(fakewriter_tasks); | |
1324 | fakewriter_tasks = NULL; | |
1325 | } | |
1326 | ||
c8e5b163 | 1327 | if (stats_task) { |
5ccf60f2 | 1328 | VERBOSE_TOROUT_STRING("Stopping rcu_torture_stats task"); |
a241ec65 PM |
1329 | kthread_stop(stats_task); |
1330 | } | |
1331 | stats_task = NULL; | |
1332 | ||
bf66f18e | 1333 | if (fqs_task) { |
5ccf60f2 | 1334 | VERBOSE_TOROUT_STRING("Stopping rcu_torture_fqs task"); |
bf66f18e PM |
1335 | kthread_stop(fqs_task); |
1336 | } | |
1337 | fqs_task = NULL; | |
8e8be45e PM |
1338 | if ((test_boost == 1 && cur_ops->can_boost) || |
1339 | test_boost == 2) { | |
1340 | unregister_cpu_notifier(&rcutorture_cpu_nb); | |
1341 | for_each_possible_cpu(i) | |
1342 | rcutorture_booster_cleanup(i); | |
1343 | } | |
e991dbc0 | 1344 | torture_shutdown_cleanup(); |
bf66f18e | 1345 | |
a241ec65 | 1346 | /* Wait for all RCU callbacks to fire. */ |
2326974d PM |
1347 | |
1348 | if (cur_ops->cb_barrier != NULL) | |
1349 | cur_ops->cb_barrier(); | |
a241ec65 | 1350 | |
a241ec65 | 1351 | rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ |
72e9bb54 | 1352 | |
fae4b54f | 1353 | if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error) |
8e8be45e | 1354 | rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); |
2e9e8081 | 1355 | else if (torture_onoff_failures()) |
091541bb PM |
1356 | rcu_torture_print_module_parms(cur_ops, |
1357 | "End of test: RCU_HOTPLUG"); | |
95c38322 | 1358 | else |
8e8be45e | 1359 | rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); |
a241ec65 PM |
1360 | } |
1361 | ||
d2818df1 PM |
1362 | #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD |
1363 | static void rcu_torture_leak_cb(struct rcu_head *rhp) | |
1364 | { | |
1365 | } | |
1366 | ||
1367 | static void rcu_torture_err_cb(struct rcu_head *rhp) | |
1368 | { | |
1369 | /* | |
1370 | * This -might- happen due to race conditions, but is unlikely. | |
1371 | * The scenario that leads to this happening is that the | |
1372 | * first of the pair of duplicate callbacks is queued, | |
1373 | * someone else starts a grace period that includes that | |
1374 | * callback, then the second of the pair must wait for the | |
1375 | * next grace period. Unlikely, but can happen. If it | |
1376 | * does happen, the debug-objects subsystem won't have splatted. | |
1377 | */ | |
1378 | pr_alert("rcutorture: duplicated callback was invoked.\n"); | |
1379 | } | |
1380 | #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | |
1381 | ||
1382 | /* | |
1383 | * Verify that double-free causes debug-objects to complain, but only | |
1384 | * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test | |
1385 | * cannot be carried out. | |
1386 | */ | |
1387 | static void rcu_test_debug_objects(void) | |
1388 | { | |
1389 | #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD | |
1390 | struct rcu_head rh1; | |
1391 | struct rcu_head rh2; | |
1392 | ||
1393 | init_rcu_head_on_stack(&rh1); | |
1394 | init_rcu_head_on_stack(&rh2); | |
1395 | pr_alert("rcutorture: WARN: Duplicate call_rcu() test starting.\n"); | |
1396 | ||
1397 | /* Try to queue the rh2 pair of callbacks for the same grace period. */ | |
1398 | preempt_disable(); /* Prevent preemption from interrupting test. */ | |
1399 | rcu_read_lock(); /* Make it impossible to finish a grace period. */ | |
1400 | call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */ | |
1401 | local_irq_disable(); /* Make it harder to start a new grace period. */ | |
1402 | call_rcu(&rh2, rcu_torture_leak_cb); | |
1403 | call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */ | |
1404 | local_irq_enable(); | |
1405 | rcu_read_unlock(); | |
1406 | preempt_enable(); | |
1407 | ||
1408 | /* Wait for them all to get done so we can safely return. */ | |
1409 | rcu_barrier(); | |
1410 | pr_alert("rcutorture: WARN: Duplicate call_rcu() test complete.\n"); | |
1411 | destroy_rcu_head_on_stack(&rh1); | |
1412 | destroy_rcu_head_on_stack(&rh2); | |
1413 | #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | |
1414 | pr_alert("rcutorture: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n"); | |
1415 | #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | |
1416 | } | |
1417 | ||
6f8bc500 | 1418 | static int __init |
a241ec65 PM |
1419 | rcu_torture_init(void) |
1420 | { | |
1421 | int i; | |
1422 | int cpu; | |
1423 | int firsterr = 0; | |
2ec1f2d9 PM |
1424 | static struct rcu_torture_ops *torture_ops[] = { |
1425 | &rcu_ops, &rcu_bh_ops, &srcu_ops, &sched_ops, | |
1426 | }; | |
a241ec65 | 1427 | |
628edaa5 | 1428 | torture_init_begin(torture_type, verbose, &rcutorture_runnable); |
343e9099 | 1429 | |
a241ec65 | 1430 | /* Process args and tell the world that the torturer is on the job. */ |
ade5fb81 | 1431 | for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { |
72e9bb54 | 1432 | cur_ops = torture_ops[i]; |
ade5fb81 | 1433 | if (strcmp(torture_type, cur_ops->name) == 0) |
72e9bb54 | 1434 | break; |
72e9bb54 | 1435 | } |
ade5fb81 | 1436 | if (i == ARRAY_SIZE(torture_ops)) { |
2caa1e44 PM |
1437 | pr_alert("rcu-torture: invalid torture type: \"%s\"\n", |
1438 | torture_type); | |
1439 | pr_alert("rcu-torture types:"); | |
cf886c44 | 1440 | for (i = 0; i < ARRAY_SIZE(torture_ops); i++) |
2caa1e44 PM |
1441 | pr_alert(" %s", torture_ops[i]->name); |
1442 | pr_alert("\n"); | |
b5daa8f3 | 1443 | torture_init_end(); |
a71fca58 | 1444 | return -EINVAL; |
72e9bb54 | 1445 | } |
bf66f18e | 1446 | if (cur_ops->fqs == NULL && fqs_duration != 0) { |
2caa1e44 | 1447 | pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n"); |
bf66f18e PM |
1448 | fqs_duration = 0; |
1449 | } | |
c8e5b163 | 1450 | if (cur_ops->init) |
72e9bb54 PM |
1451 | cur_ops->init(); /* no "goto unwind" prior to this point!!! */ |
1452 | ||
a241ec65 PM |
1453 | if (nreaders >= 0) |
1454 | nrealreaders = nreaders; | |
1455 | else | |
1456 | nrealreaders = 2 * num_online_cpus(); | |
8e8be45e | 1457 | rcu_torture_print_module_parms(cur_ops, "Start of test"); |
a241ec65 PM |
1458 | |
1459 | /* Set up the freelist. */ | |
1460 | ||
1461 | INIT_LIST_HEAD(&rcu_torture_freelist); | |
788e770e | 1462 | for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) { |
996417d2 | 1463 | rcu_tortures[i].rtort_mbtest = 0; |
a241ec65 PM |
1464 | list_add_tail(&rcu_tortures[i].rtort_free, |
1465 | &rcu_torture_freelist); | |
1466 | } | |
1467 | ||
1468 | /* Initialize the statistics so that each run gets its own numbers. */ | |
1469 | ||
1470 | rcu_torture_current = NULL; | |
1471 | rcu_torture_current_version = 0; | |
1472 | atomic_set(&n_rcu_torture_alloc, 0); | |
1473 | atomic_set(&n_rcu_torture_alloc_fail, 0); | |
1474 | atomic_set(&n_rcu_torture_free, 0); | |
996417d2 PM |
1475 | atomic_set(&n_rcu_torture_mberror, 0); |
1476 | atomic_set(&n_rcu_torture_error, 0); | |
fae4b54f | 1477 | n_rcu_torture_barrier_error = 0; |
8e8be45e PM |
1478 | n_rcu_torture_boost_ktrerror = 0; |
1479 | n_rcu_torture_boost_rterror = 0; | |
8e8be45e PM |
1480 | n_rcu_torture_boost_failure = 0; |
1481 | n_rcu_torture_boosts = 0; | |
a241ec65 PM |
1482 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) |
1483 | atomic_set(&rcu_torture_wcount[i], 0); | |
0a945022 | 1484 | for_each_possible_cpu(cpu) { |
a241ec65 PM |
1485 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { |
1486 | per_cpu(rcu_torture_count, cpu)[i] = 0; | |
1487 | per_cpu(rcu_torture_batch, cpu)[i] = 0; | |
1488 | } | |
1489 | } | |
1490 | ||
1491 | /* Start up the kthreads. */ | |
1492 | ||
47cf29b9 PM |
1493 | firsterr = torture_create_kthread(rcu_torture_writer, NULL, |
1494 | writer_task); | |
1495 | if (firsterr) | |
a241ec65 | 1496 | goto unwind; |
b772e1dd | 1497 | fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]), |
a71fca58 | 1498 | GFP_KERNEL); |
b772e1dd | 1499 | if (fakewriter_tasks == NULL) { |
5ccf60f2 | 1500 | VERBOSE_TOROUT_ERRSTRING("out of memory"); |
b772e1dd JT |
1501 | firsterr = -ENOMEM; |
1502 | goto unwind; | |
1503 | } | |
1504 | for (i = 0; i < nfakewriters; i++) { | |
47cf29b9 PM |
1505 | firsterr = torture_create_kthread(rcu_torture_fakewriter, |
1506 | NULL, fakewriter_tasks[i]); | |
1507 | if (firsterr) | |
b772e1dd | 1508 | goto unwind; |
b772e1dd | 1509 | } |
2860aaba | 1510 | reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]), |
a241ec65 PM |
1511 | GFP_KERNEL); |
1512 | if (reader_tasks == NULL) { | |
5ccf60f2 | 1513 | VERBOSE_TOROUT_ERRSTRING("out of memory"); |
a241ec65 PM |
1514 | firsterr = -ENOMEM; |
1515 | goto unwind; | |
1516 | } | |
1517 | for (i = 0; i < nrealreaders; i++) { | |
47cf29b9 PM |
1518 | firsterr = torture_create_kthread(rcu_torture_reader, NULL, |
1519 | reader_tasks[i]); | |
1520 | if (firsterr) | |
a241ec65 | 1521 | goto unwind; |
a241ec65 PM |
1522 | } |
1523 | if (stat_interval > 0) { | |
47cf29b9 PM |
1524 | firsterr = torture_create_kthread(rcu_torture_stats, NULL, |
1525 | stats_task); | |
1526 | if (firsterr) | |
a241ec65 | 1527 | goto unwind; |
a241ec65 | 1528 | } |
d84f5203 | 1529 | if (test_no_idle_hz) { |
3808dc9f PM |
1530 | firsterr = torture_shuffle_init(shuffle_interval * HZ); |
1531 | if (firsterr) | |
73d0a4b1 | 1532 | goto unwind; |
d84f5203 | 1533 | } |
d120f65f PM |
1534 | if (stutter < 0) |
1535 | stutter = 0; | |
1536 | if (stutter) { | |
628edaa5 PM |
1537 | firsterr = torture_stutter_init(stutter * HZ); |
1538 | if (firsterr) | |
d120f65f | 1539 | goto unwind; |
d120f65f | 1540 | } |
bf66f18e PM |
1541 | if (fqs_duration < 0) |
1542 | fqs_duration = 0; | |
1543 | if (fqs_duration) { | |
628edaa5 | 1544 | /* Create the fqs thread */ |
47cf29b9 PM |
1545 | torture_create_kthread(rcu_torture_fqs, NULL, fqs_task); |
1546 | if (firsterr) | |
bf66f18e | 1547 | goto unwind; |
bf66f18e | 1548 | } |
8e8be45e PM |
1549 | if (test_boost_interval < 1) |
1550 | test_boost_interval = 1; | |
1551 | if (test_boost_duration < 2) | |
1552 | test_boost_duration = 2; | |
1553 | if ((test_boost == 1 && cur_ops->can_boost) || | |
1554 | test_boost == 2) { | |
8e8be45e PM |
1555 | |
1556 | boost_starttime = jiffies + test_boost_interval * HZ; | |
1557 | register_cpu_notifier(&rcutorture_cpu_nb); | |
1558 | for_each_possible_cpu(i) { | |
1559 | if (cpu_is_offline(i)) | |
1560 | continue; /* Heuristic: CPU can go offline. */ | |
01025ebc PM |
1561 | firsterr = rcutorture_booster_init(i); |
1562 | if (firsterr) | |
8e8be45e | 1563 | goto unwind; |
8e8be45e PM |
1564 | } |
1565 | } | |
01025ebc PM |
1566 | firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); |
1567 | if (firsterr) | |
e991dbc0 | 1568 | goto unwind; |
01025ebc PM |
1569 | firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval * HZ); |
1570 | if (firsterr) | |
37e377d2 | 1571 | goto unwind; |
01025ebc PM |
1572 | firsterr = rcu_torture_stall_init(); |
1573 | if (firsterr) | |
37e377d2 | 1574 | goto unwind; |
01025ebc PM |
1575 | firsterr = rcu_torture_barrier_init(); |
1576 | if (firsterr) | |
fae4b54f | 1577 | goto unwind; |
d2818df1 PM |
1578 | if (object_debug) |
1579 | rcu_test_debug_objects(); | |
4a298656 | 1580 | rcutorture_record_test_transition(); |
b5daa8f3 | 1581 | torture_init_end(); |
a241ec65 PM |
1582 | return 0; |
1583 | ||
1584 | unwind: | |
b5daa8f3 | 1585 | torture_init_end(); |
a241ec65 PM |
1586 | rcu_torture_cleanup(); |
1587 | return firsterr; | |
1588 | } | |
1589 | ||
1590 | module_init(rcu_torture_init); | |
1591 | module_exit(rcu_torture_cleanup); |