Merge tag 'drm-intel-next-fixes-2016-05-25' of git://anongit.freedesktop.org/drm...
[deliverable/linux.git] / kernel / rcu / rcutorture.c
1 /*
2 * Read-Copy Update module-based torture test facility
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright (C) IBM Corporation, 2005, 2006
19 *
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21 * Josh Triplett <josh@joshtriplett.org>
22 *
23 * See also: Documentation/RCU/torture.txt
24 */
25 #include <linux/types.h>
26 #include <linux/kernel.h>
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/kthread.h>
30 #include <linux/err.h>
31 #include <linux/spinlock.h>
32 #include <linux/smp.h>
33 #include <linux/rcupdate.h>
34 #include <linux/interrupt.h>
35 #include <linux/sched.h>
36 #include <linux/atomic.h>
37 #include <linux/bitops.h>
38 #include <linux/completion.h>
39 #include <linux/moduleparam.h>
40 #include <linux/percpu.h>
41 #include <linux/notifier.h>
42 #include <linux/reboot.h>
43 #include <linux/freezer.h>
44 #include <linux/cpu.h>
45 #include <linux/delay.h>
46 #include <linux/stat.h>
47 #include <linux/srcu.h>
48 #include <linux/slab.h>
49 #include <linux/trace_clock.h>
50 #include <asm/byteorder.h>
51 #include <linux/torture.h>
52 #include <linux/vmalloc.h>
53
54 MODULE_LICENSE("GPL");
55 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
56
57
58 torture_param(int, cbflood_inter_holdoff, HZ,
59 "Holdoff between floods (jiffies)");
60 torture_param(int, cbflood_intra_holdoff, 1,
61 "Holdoff between bursts (jiffies)");
62 torture_param(int, cbflood_n_burst, 3, "# bursts in flood, zero to disable");
63 torture_param(int, cbflood_n_per_burst, 20000,
64 "# callbacks per burst in flood");
65 torture_param(int, fqs_duration, 0,
66 "Duration of fqs bursts (us), 0 to disable");
67 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
68 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
69 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
70 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
71 torture_param(bool, gp_normal, false,
72 "Use normal (non-expedited) GP wait primitives");
73 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
74 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
75 torture_param(int, n_barrier_cbs, 0,
76 "# of callbacks/kthreads for barrier testing");
77 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
78 torture_param(int, nreaders, -1, "Number of RCU reader threads");
79 torture_param(int, object_debug, 0,
80 "Enable debug-object double call_rcu() testing");
81 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
82 torture_param(int, onoff_interval, 0,
83 "Time between CPU hotplugs (s), 0=disable");
84 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
85 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
86 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
87 torture_param(int, stall_cpu_holdoff, 10,
88 "Time to wait before starting stall (s).");
89 torture_param(int, stat_interval, 60,
90 "Number of seconds between stats printk()s");
91 torture_param(int, stutter, 5, "Number of seconds to run/halt test");
92 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
93 torture_param(int, test_boost_duration, 4,
94 "Duration of each boost test, seconds.");
95 torture_param(int, test_boost_interval, 7,
96 "Interval between boost tests, seconds.");
97 torture_param(bool, test_no_idle_hz, true,
98 "Test support for tickless idle CPUs");
99 torture_param(bool, verbose, true,
100 "Enable verbose debugging printk()s");
101
102 static char *torture_type = "rcu";
103 module_param(torture_type, charp, 0444);
104 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, ...)");
105
106 static int nrealreaders;
107 static int ncbflooders;
108 static struct task_struct *writer_task;
109 static struct task_struct **fakewriter_tasks;
110 static struct task_struct **reader_tasks;
111 static struct task_struct *stats_task;
112 static struct task_struct **cbflood_task;
113 static struct task_struct *fqs_task;
114 static struct task_struct *boost_tasks[NR_CPUS];
115 static struct task_struct *stall_task;
116 static struct task_struct **barrier_cbs_tasks;
117 static struct task_struct *barrier_task;
118
119 #define RCU_TORTURE_PIPE_LEN 10
120
121 struct rcu_torture {
122 struct rcu_head rtort_rcu;
123 int rtort_pipe_count;
124 struct list_head rtort_free;
125 int rtort_mbtest;
126 };
127
128 static LIST_HEAD(rcu_torture_freelist);
129 static struct rcu_torture __rcu *rcu_torture_current;
130 static unsigned long rcu_torture_current_version;
131 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
132 static DEFINE_SPINLOCK(rcu_torture_lock);
133 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
134 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
135 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
136 static atomic_t n_rcu_torture_alloc;
137 static atomic_t n_rcu_torture_alloc_fail;
138 static atomic_t n_rcu_torture_free;
139 static atomic_t n_rcu_torture_mberror;
140 static atomic_t n_rcu_torture_error;
141 static long n_rcu_torture_barrier_error;
142 static long n_rcu_torture_boost_ktrerror;
143 static long n_rcu_torture_boost_rterror;
144 static long n_rcu_torture_boost_failure;
145 static long n_rcu_torture_boosts;
146 static long n_rcu_torture_timers;
147 static long n_barrier_attempts;
148 static long n_barrier_successes;
149 static atomic_long_t n_cbfloods;
150 static struct list_head rcu_torture_removed;
151
152 static int rcu_torture_writer_state;
153 #define RTWS_FIXED_DELAY 0
154 #define RTWS_DELAY 1
155 #define RTWS_REPLACE 2
156 #define RTWS_DEF_FREE 3
157 #define RTWS_EXP_SYNC 4
158 #define RTWS_COND_GET 5
159 #define RTWS_COND_SYNC 6
160 #define RTWS_SYNC 7
161 #define RTWS_STUTTER 8
162 #define RTWS_STOPPING 9
163 static const char * const rcu_torture_writer_state_names[] = {
164 "RTWS_FIXED_DELAY",
165 "RTWS_DELAY",
166 "RTWS_REPLACE",
167 "RTWS_DEF_FREE",
168 "RTWS_EXP_SYNC",
169 "RTWS_COND_GET",
170 "RTWS_COND_SYNC",
171 "RTWS_SYNC",
172 "RTWS_STUTTER",
173 "RTWS_STOPPING",
174 };
175
176 static const char *rcu_torture_writer_state_getname(void)
177 {
178 unsigned int i = READ_ONCE(rcu_torture_writer_state);
179
180 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names))
181 return "???";
182 return rcu_torture_writer_state_names[i];
183 }
184
185 #if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE)
186 #define RCUTORTURE_RUNNABLE_INIT 1
187 #else
188 #define RCUTORTURE_RUNNABLE_INIT 0
189 #endif
190 static int torture_runnable = RCUTORTURE_RUNNABLE_INIT;
191 module_param(torture_runnable, int, 0444);
192 MODULE_PARM_DESC(torture_runnable, "Start rcutorture at boot");
193
194 #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
195 #define rcu_can_boost() 1
196 #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
197 #define rcu_can_boost() 0
198 #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
199
200 #ifdef CONFIG_RCU_TRACE
201 static u64 notrace rcu_trace_clock_local(void)
202 {
203 u64 ts = trace_clock_local();
204 unsigned long __maybe_unused ts_rem = do_div(ts, NSEC_PER_USEC);
205 return ts;
206 }
207 #else /* #ifdef CONFIG_RCU_TRACE */
208 static u64 notrace rcu_trace_clock_local(void)
209 {
210 return 0ULL;
211 }
212 #endif /* #else #ifdef CONFIG_RCU_TRACE */
213
214 static unsigned long boost_starttime; /* jiffies of next boost test start. */
215 static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
216 /* and boost task create/destroy. */
217 static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */
218 static bool barrier_phase; /* Test phase. */
219 static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */
220 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
221 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
222
223 /*
224 * Allocate an element from the rcu_tortures pool.
225 */
226 static struct rcu_torture *
227 rcu_torture_alloc(void)
228 {
229 struct list_head *p;
230
231 spin_lock_bh(&rcu_torture_lock);
232 if (list_empty(&rcu_torture_freelist)) {
233 atomic_inc(&n_rcu_torture_alloc_fail);
234 spin_unlock_bh(&rcu_torture_lock);
235 return NULL;
236 }
237 atomic_inc(&n_rcu_torture_alloc);
238 p = rcu_torture_freelist.next;
239 list_del_init(p);
240 spin_unlock_bh(&rcu_torture_lock);
241 return container_of(p, struct rcu_torture, rtort_free);
242 }
243
244 /*
245 * Free an element to the rcu_tortures pool.
246 */
247 static void
248 rcu_torture_free(struct rcu_torture *p)
249 {
250 atomic_inc(&n_rcu_torture_free);
251 spin_lock_bh(&rcu_torture_lock);
252 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
253 spin_unlock_bh(&rcu_torture_lock);
254 }
255
256 /*
257 * Operations vector for selecting different types of tests.
258 */
259
260 struct rcu_torture_ops {
261 int ttype;
262 void (*init)(void);
263 void (*cleanup)(void);
264 int (*readlock)(void);
265 void (*read_delay)(struct torture_random_state *rrsp);
266 void (*readunlock)(int idx);
267 unsigned long (*started)(void);
268 unsigned long (*completed)(void);
269 void (*deferred_free)(struct rcu_torture *p);
270 void (*sync)(void);
271 void (*exp_sync)(void);
272 unsigned long (*get_state)(void);
273 void (*cond_sync)(unsigned long oldstate);
274 call_rcu_func_t call;
275 void (*cb_barrier)(void);
276 void (*fqs)(void);
277 void (*stats)(void);
278 int irq_capable;
279 int can_boost;
280 const char *name;
281 };
282
283 static struct rcu_torture_ops *cur_ops;
284
285 /*
286 * Definitions for rcu torture testing.
287 */
288
289 static int rcu_torture_read_lock(void) __acquires(RCU)
290 {
291 rcu_read_lock();
292 return 0;
293 }
294
295 static void rcu_read_delay(struct torture_random_state *rrsp)
296 {
297 const unsigned long shortdelay_us = 200;
298 const unsigned long longdelay_ms = 50;
299
300 /* We want a short delay sometimes to make a reader delay the grace
301 * period, and we want a long delay occasionally to trigger
302 * force_quiescent_state. */
303
304 if (!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms)))
305 mdelay(longdelay_ms);
306 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us)))
307 udelay(shortdelay_us);
308 #ifdef CONFIG_PREEMPT
309 if (!preempt_count() &&
310 !(torture_random(rrsp) % (nrealreaders * 20000)))
311 preempt_schedule(); /* No QS if preempt_disable() in effect */
312 #endif
313 }
314
315 static void rcu_torture_read_unlock(int idx) __releases(RCU)
316 {
317 rcu_read_unlock();
318 }
319
320 /*
321 * Update callback in the pipe. This should be invoked after a grace period.
322 */
323 static bool
324 rcu_torture_pipe_update_one(struct rcu_torture *rp)
325 {
326 int i;
327
328 i = rp->rtort_pipe_count;
329 if (i > RCU_TORTURE_PIPE_LEN)
330 i = RCU_TORTURE_PIPE_LEN;
331 atomic_inc(&rcu_torture_wcount[i]);
332 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
333 rp->rtort_mbtest = 0;
334 return true;
335 }
336 return false;
337 }
338
339 /*
340 * Update all callbacks in the pipe. Suitable for synchronous grace-period
341 * primitives.
342 */
343 static void
344 rcu_torture_pipe_update(struct rcu_torture *old_rp)
345 {
346 struct rcu_torture *rp;
347 struct rcu_torture *rp1;
348
349 if (old_rp)
350 list_add(&old_rp->rtort_free, &rcu_torture_removed);
351 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
352 if (rcu_torture_pipe_update_one(rp)) {
353 list_del(&rp->rtort_free);
354 rcu_torture_free(rp);
355 }
356 }
357 }
358
359 static void
360 rcu_torture_cb(struct rcu_head *p)
361 {
362 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
363
364 if (torture_must_stop_irq()) {
365 /* Test is ending, just drop callbacks on the floor. */
366 /* The next initialization will pick up the pieces. */
367 return;
368 }
369 if (rcu_torture_pipe_update_one(rp))
370 rcu_torture_free(rp);
371 else
372 cur_ops->deferred_free(rp);
373 }
374
375 static unsigned long rcu_no_completed(void)
376 {
377 return 0;
378 }
379
380 static void rcu_torture_deferred_free(struct rcu_torture *p)
381 {
382 call_rcu(&p->rtort_rcu, rcu_torture_cb);
383 }
384
385 static void rcu_sync_torture_init(void)
386 {
387 INIT_LIST_HEAD(&rcu_torture_removed);
388 }
389
390 static struct rcu_torture_ops rcu_ops = {
391 .ttype = RCU_FLAVOR,
392 .init = rcu_sync_torture_init,
393 .readlock = rcu_torture_read_lock,
394 .read_delay = rcu_read_delay,
395 .readunlock = rcu_torture_read_unlock,
396 .started = rcu_batches_started,
397 .completed = rcu_batches_completed,
398 .deferred_free = rcu_torture_deferred_free,
399 .sync = synchronize_rcu,
400 .exp_sync = synchronize_rcu_expedited,
401 .get_state = get_state_synchronize_rcu,
402 .cond_sync = cond_synchronize_rcu,
403 .call = call_rcu,
404 .cb_barrier = rcu_barrier,
405 .fqs = rcu_force_quiescent_state,
406 .stats = NULL,
407 .irq_capable = 1,
408 .can_boost = rcu_can_boost(),
409 .name = "rcu"
410 };
411
412 /*
413 * Definitions for rcu_bh torture testing.
414 */
415
416 static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH)
417 {
418 rcu_read_lock_bh();
419 return 0;
420 }
421
422 static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
423 {
424 rcu_read_unlock_bh();
425 }
426
427 static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
428 {
429 call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
430 }
431
432 static struct rcu_torture_ops rcu_bh_ops = {
433 .ttype = RCU_BH_FLAVOR,
434 .init = rcu_sync_torture_init,
435 .readlock = rcu_bh_torture_read_lock,
436 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
437 .readunlock = rcu_bh_torture_read_unlock,
438 .started = rcu_batches_started_bh,
439 .completed = rcu_batches_completed_bh,
440 .deferred_free = rcu_bh_torture_deferred_free,
441 .sync = synchronize_rcu_bh,
442 .exp_sync = synchronize_rcu_bh_expedited,
443 .call = call_rcu_bh,
444 .cb_barrier = rcu_barrier_bh,
445 .fqs = rcu_bh_force_quiescent_state,
446 .stats = NULL,
447 .irq_capable = 1,
448 .name = "rcu_bh"
449 };
450
451 /*
452 * Don't even think about trying any of these in real life!!!
453 * The names includes "busted", and they really means it!
454 * The only purpose of these functions is to provide a buggy RCU
455 * implementation to make sure that rcutorture correctly emits
456 * buggy-RCU error messages.
457 */
458 static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
459 {
460 /* This is a deliberate bug for testing purposes only! */
461 rcu_torture_cb(&p->rtort_rcu);
462 }
463
464 static void synchronize_rcu_busted(void)
465 {
466 /* This is a deliberate bug for testing purposes only! */
467 }
468
469 static void
470 call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
471 {
472 /* This is a deliberate bug for testing purposes only! */
473 func(head);
474 }
475
476 static struct rcu_torture_ops rcu_busted_ops = {
477 .ttype = INVALID_RCU_FLAVOR,
478 .init = rcu_sync_torture_init,
479 .readlock = rcu_torture_read_lock,
480 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
481 .readunlock = rcu_torture_read_unlock,
482 .started = rcu_no_completed,
483 .completed = rcu_no_completed,
484 .deferred_free = rcu_busted_torture_deferred_free,
485 .sync = synchronize_rcu_busted,
486 .exp_sync = synchronize_rcu_busted,
487 .call = call_rcu_busted,
488 .cb_barrier = NULL,
489 .fqs = NULL,
490 .stats = NULL,
491 .irq_capable = 1,
492 .name = "rcu_busted"
493 };
494
495 /*
496 * Definitions for srcu torture testing.
497 */
498
499 DEFINE_STATIC_SRCU(srcu_ctl);
500 static struct srcu_struct srcu_ctld;
501 static struct srcu_struct *srcu_ctlp = &srcu_ctl;
502
503 static int srcu_torture_read_lock(void) __acquires(srcu_ctlp)
504 {
505 return srcu_read_lock(srcu_ctlp);
506 }
507
508 static void srcu_read_delay(struct torture_random_state *rrsp)
509 {
510 long delay;
511 const long uspertick = 1000000 / HZ;
512 const long longdelay = 10;
513
514 /* We want there to be long-running readers, but not all the time. */
515
516 delay = torture_random(rrsp) %
517 (nrealreaders * 2 * longdelay * uspertick);
518 if (!delay)
519 schedule_timeout_interruptible(longdelay);
520 else
521 rcu_read_delay(rrsp);
522 }
523
524 static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp)
525 {
526 srcu_read_unlock(srcu_ctlp, idx);
527 }
528
529 static unsigned long srcu_torture_completed(void)
530 {
531 return srcu_batches_completed(srcu_ctlp);
532 }
533
534 static void srcu_torture_deferred_free(struct rcu_torture *rp)
535 {
536 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
537 }
538
539 static void srcu_torture_synchronize(void)
540 {
541 synchronize_srcu(srcu_ctlp);
542 }
543
544 static void srcu_torture_call(struct rcu_head *head,
545 rcu_callback_t func)
546 {
547 call_srcu(srcu_ctlp, head, func);
548 }
549
550 static void srcu_torture_barrier(void)
551 {
552 srcu_barrier(srcu_ctlp);
553 }
554
555 static void srcu_torture_stats(void)
556 {
557 int cpu;
558 int idx = srcu_ctlp->completed & 0x1;
559
560 pr_alert("%s%s per-CPU(idx=%d):",
561 torture_type, TORTURE_FLAG, idx);
562 for_each_possible_cpu(cpu) {
563 long c0, c1;
564
565 c0 = (long)per_cpu_ptr(srcu_ctlp->per_cpu_ref, cpu)->c[!idx];
566 c1 = (long)per_cpu_ptr(srcu_ctlp->per_cpu_ref, cpu)->c[idx];
567 pr_cont(" %d(%ld,%ld)", cpu, c0, c1);
568 }
569 pr_cont("\n");
570 }
571
572 static void srcu_torture_synchronize_expedited(void)
573 {
574 synchronize_srcu_expedited(srcu_ctlp);
575 }
576
577 static struct rcu_torture_ops srcu_ops = {
578 .ttype = SRCU_FLAVOR,
579 .init = rcu_sync_torture_init,
580 .readlock = srcu_torture_read_lock,
581 .read_delay = srcu_read_delay,
582 .readunlock = srcu_torture_read_unlock,
583 .started = NULL,
584 .completed = srcu_torture_completed,
585 .deferred_free = srcu_torture_deferred_free,
586 .sync = srcu_torture_synchronize,
587 .exp_sync = srcu_torture_synchronize_expedited,
588 .call = srcu_torture_call,
589 .cb_barrier = srcu_torture_barrier,
590 .stats = srcu_torture_stats,
591 .name = "srcu"
592 };
593
594 static void srcu_torture_init(void)
595 {
596 rcu_sync_torture_init();
597 WARN_ON(init_srcu_struct(&srcu_ctld));
598 srcu_ctlp = &srcu_ctld;
599 }
600
601 static void srcu_torture_cleanup(void)
602 {
603 cleanup_srcu_struct(&srcu_ctld);
604 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
605 }
606
607 /* As above, but dynamically allocated. */
608 static struct rcu_torture_ops srcud_ops = {
609 .ttype = SRCU_FLAVOR,
610 .init = srcu_torture_init,
611 .cleanup = srcu_torture_cleanup,
612 .readlock = srcu_torture_read_lock,
613 .read_delay = srcu_read_delay,
614 .readunlock = srcu_torture_read_unlock,
615 .started = NULL,
616 .completed = srcu_torture_completed,
617 .deferred_free = srcu_torture_deferred_free,
618 .sync = srcu_torture_synchronize,
619 .exp_sync = srcu_torture_synchronize_expedited,
620 .call = srcu_torture_call,
621 .cb_barrier = srcu_torture_barrier,
622 .stats = srcu_torture_stats,
623 .name = "srcud"
624 };
625
626 /*
627 * Definitions for sched torture testing.
628 */
629
630 static int sched_torture_read_lock(void)
631 {
632 preempt_disable();
633 return 0;
634 }
635
636 static void sched_torture_read_unlock(int idx)
637 {
638 preempt_enable();
639 }
640
641 static void rcu_sched_torture_deferred_free(struct rcu_torture *p)
642 {
643 call_rcu_sched(&p->rtort_rcu, rcu_torture_cb);
644 }
645
646 static struct rcu_torture_ops sched_ops = {
647 .ttype = RCU_SCHED_FLAVOR,
648 .init = rcu_sync_torture_init,
649 .readlock = sched_torture_read_lock,
650 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
651 .readunlock = sched_torture_read_unlock,
652 .started = rcu_batches_started_sched,
653 .completed = rcu_batches_completed_sched,
654 .deferred_free = rcu_sched_torture_deferred_free,
655 .sync = synchronize_sched,
656 .exp_sync = synchronize_sched_expedited,
657 .get_state = get_state_synchronize_sched,
658 .cond_sync = cond_synchronize_sched,
659 .call = call_rcu_sched,
660 .cb_barrier = rcu_barrier_sched,
661 .fqs = rcu_sched_force_quiescent_state,
662 .stats = NULL,
663 .irq_capable = 1,
664 .name = "sched"
665 };
666
667 #ifdef CONFIG_TASKS_RCU
668
669 /*
670 * Definitions for RCU-tasks torture testing.
671 */
672
673 static int tasks_torture_read_lock(void)
674 {
675 return 0;
676 }
677
678 static void tasks_torture_read_unlock(int idx)
679 {
680 }
681
682 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
683 {
684 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
685 }
686
687 static struct rcu_torture_ops tasks_ops = {
688 .ttype = RCU_TASKS_FLAVOR,
689 .init = rcu_sync_torture_init,
690 .readlock = tasks_torture_read_lock,
691 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
692 .readunlock = tasks_torture_read_unlock,
693 .started = rcu_no_completed,
694 .completed = rcu_no_completed,
695 .deferred_free = rcu_tasks_torture_deferred_free,
696 .sync = synchronize_rcu_tasks,
697 .exp_sync = synchronize_rcu_tasks,
698 .call = call_rcu_tasks,
699 .cb_barrier = rcu_barrier_tasks,
700 .fqs = NULL,
701 .stats = NULL,
702 .irq_capable = 1,
703 .name = "tasks"
704 };
705
706 #define RCUTORTURE_TASKS_OPS &tasks_ops,
707
708 static bool __maybe_unused torturing_tasks(void)
709 {
710 return cur_ops == &tasks_ops;
711 }
712
713 #else /* #ifdef CONFIG_TASKS_RCU */
714
715 #define RCUTORTURE_TASKS_OPS
716
717 static bool __maybe_unused torturing_tasks(void)
718 {
719 return false;
720 }
721
722 #endif /* #else #ifdef CONFIG_TASKS_RCU */
723
724 /*
725 * RCU torture priority-boost testing. Runs one real-time thread per
726 * CPU for moderate bursts, repeatedly registering RCU callbacks and
727 * spinning waiting for them to be invoked. If a given callback takes
728 * too long to be invoked, we assume that priority inversion has occurred.
729 */
730
731 struct rcu_boost_inflight {
732 struct rcu_head rcu;
733 int inflight;
734 };
735
736 static void rcu_torture_boost_cb(struct rcu_head *head)
737 {
738 struct rcu_boost_inflight *rbip =
739 container_of(head, struct rcu_boost_inflight, rcu);
740
741 /* Ensure RCU-core accesses precede clearing ->inflight */
742 smp_store_release(&rbip->inflight, 0);
743 }
744
745 static int rcu_torture_boost(void *arg)
746 {
747 unsigned long call_rcu_time;
748 unsigned long endtime;
749 unsigned long oldstarttime;
750 struct rcu_boost_inflight rbi = { .inflight = 0 };
751 struct sched_param sp;
752
753 VERBOSE_TOROUT_STRING("rcu_torture_boost started");
754
755 /* Set real-time priority. */
756 sp.sched_priority = 1;
757 if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) {
758 VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!");
759 n_rcu_torture_boost_rterror++;
760 }
761
762 init_rcu_head_on_stack(&rbi.rcu);
763 /* Each pass through the following loop does one boost-test cycle. */
764 do {
765 /* Wait for the next test interval. */
766 oldstarttime = boost_starttime;
767 while (ULONG_CMP_LT(jiffies, oldstarttime)) {
768 schedule_timeout_interruptible(oldstarttime - jiffies);
769 stutter_wait("rcu_torture_boost");
770 if (torture_must_stop())
771 goto checkwait;
772 }
773
774 /* Do one boost-test interval. */
775 endtime = oldstarttime + test_boost_duration * HZ;
776 call_rcu_time = jiffies;
777 while (ULONG_CMP_LT(jiffies, endtime)) {
778 /* If we don't have a callback in flight, post one. */
779 if (!smp_load_acquire(&rbi.inflight)) {
780 /* RCU core before ->inflight = 1. */
781 smp_store_release(&rbi.inflight, 1);
782 call_rcu(&rbi.rcu, rcu_torture_boost_cb);
783 if (jiffies - call_rcu_time >
784 test_boost_duration * HZ - HZ / 2) {
785 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
786 n_rcu_torture_boost_failure++;
787 }
788 call_rcu_time = jiffies;
789 }
790 stutter_wait("rcu_torture_boost");
791 if (torture_must_stop())
792 goto checkwait;
793 }
794
795 /*
796 * Set the start time of the next test interval.
797 * Yes, this is vulnerable to long delays, but such
798 * delays simply cause a false negative for the next
799 * interval. Besides, we are running at RT priority,
800 * so delays should be relatively rare.
801 */
802 while (oldstarttime == boost_starttime &&
803 !kthread_should_stop()) {
804 if (mutex_trylock(&boost_mutex)) {
805 boost_starttime = jiffies +
806 test_boost_interval * HZ;
807 n_rcu_torture_boosts++;
808 mutex_unlock(&boost_mutex);
809 break;
810 }
811 schedule_timeout_uninterruptible(1);
812 }
813
814 /* Go do the stutter. */
815 checkwait: stutter_wait("rcu_torture_boost");
816 } while (!torture_must_stop());
817
818 /* Clean up and exit. */
819 while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) {
820 torture_shutdown_absorb("rcu_torture_boost");
821 schedule_timeout_uninterruptible(1);
822 }
823 destroy_rcu_head_on_stack(&rbi.rcu);
824 torture_kthread_stopping("rcu_torture_boost");
825 return 0;
826 }
827
828 static void rcu_torture_cbflood_cb(struct rcu_head *rhp)
829 {
830 }
831
832 /*
833 * RCU torture callback-flood kthread. Repeatedly induces bursts of calls
834 * to call_rcu() or analogous, increasing the probability of occurrence
835 * of callback-overflow corner cases.
836 */
837 static int
838 rcu_torture_cbflood(void *arg)
839 {
840 int err = 1;
841 int i;
842 int j;
843 struct rcu_head *rhp;
844
845 if (cbflood_n_per_burst > 0 &&
846 cbflood_inter_holdoff > 0 &&
847 cbflood_intra_holdoff > 0 &&
848 cur_ops->call &&
849 cur_ops->cb_barrier) {
850 rhp = vmalloc(sizeof(*rhp) *
851 cbflood_n_burst * cbflood_n_per_burst);
852 err = !rhp;
853 }
854 if (err) {
855 VERBOSE_TOROUT_STRING("rcu_torture_cbflood disabled: Bad args or OOM");
856 goto wait_for_stop;
857 }
858 VERBOSE_TOROUT_STRING("rcu_torture_cbflood task started");
859 do {
860 schedule_timeout_interruptible(cbflood_inter_holdoff);
861 atomic_long_inc(&n_cbfloods);
862 WARN_ON(signal_pending(current));
863 for (i = 0; i < cbflood_n_burst; i++) {
864 for (j = 0; j < cbflood_n_per_burst; j++) {
865 cur_ops->call(&rhp[i * cbflood_n_per_burst + j],
866 rcu_torture_cbflood_cb);
867 }
868 schedule_timeout_interruptible(cbflood_intra_holdoff);
869 WARN_ON(signal_pending(current));
870 }
871 cur_ops->cb_barrier();
872 stutter_wait("rcu_torture_cbflood");
873 } while (!torture_must_stop());
874 vfree(rhp);
875 wait_for_stop:
876 torture_kthread_stopping("rcu_torture_cbflood");
877 return 0;
878 }
879
880 /*
881 * RCU torture force-quiescent-state kthread. Repeatedly induces
882 * bursts of calls to force_quiescent_state(), increasing the probability
883 * of occurrence of some important types of race conditions.
884 */
885 static int
886 rcu_torture_fqs(void *arg)
887 {
888 unsigned long fqs_resume_time;
889 int fqs_burst_remaining;
890
891 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
892 do {
893 fqs_resume_time = jiffies + fqs_stutter * HZ;
894 while (ULONG_CMP_LT(jiffies, fqs_resume_time) &&
895 !kthread_should_stop()) {
896 schedule_timeout_interruptible(1);
897 }
898 fqs_burst_remaining = fqs_duration;
899 while (fqs_burst_remaining > 0 &&
900 !kthread_should_stop()) {
901 cur_ops->fqs();
902 udelay(fqs_holdoff);
903 fqs_burst_remaining -= fqs_holdoff;
904 }
905 stutter_wait("rcu_torture_fqs");
906 } while (!torture_must_stop());
907 torture_kthread_stopping("rcu_torture_fqs");
908 return 0;
909 }
910
911 /*
912 * RCU torture writer kthread. Repeatedly substitutes a new structure
913 * for that pointed to by rcu_torture_current, freeing the old structure
914 * after a series of grace periods (the "pipeline").
915 */
916 static int
917 rcu_torture_writer(void *arg)
918 {
919 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
920 int expediting = 0;
921 unsigned long gp_snap;
922 bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal;
923 bool gp_sync1 = gp_sync;
924 int i;
925 struct rcu_torture *rp;
926 struct rcu_torture *old_rp;
927 static DEFINE_TORTURE_RANDOM(rand);
928 int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC,
929 RTWS_COND_GET, RTWS_SYNC };
930 int nsynctypes = 0;
931
932 VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
933 if (!can_expedite) {
934 pr_alert("%s" TORTURE_FLAG
935 " GP expediting controlled from boot/sysfs for %s,\n",
936 torture_type, cur_ops->name);
937 pr_alert("%s" TORTURE_FLAG
938 " Disabled dynamic grace-period expediting.\n",
939 torture_type);
940 }
941
942 /* Initialize synctype[] array. If none set, take default. */
943 if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1)
944 gp_cond1 = gp_exp1 = gp_normal1 = gp_sync1 = true;
945 if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync)
946 synctype[nsynctypes++] = RTWS_COND_GET;
947 else if (gp_cond && (!cur_ops->get_state || !cur_ops->cond_sync))
948 pr_alert("rcu_torture_writer: gp_cond without primitives.\n");
949 if (gp_exp1 && cur_ops->exp_sync)
950 synctype[nsynctypes++] = RTWS_EXP_SYNC;
951 else if (gp_exp && !cur_ops->exp_sync)
952 pr_alert("rcu_torture_writer: gp_exp without primitives.\n");
953 if (gp_normal1 && cur_ops->deferred_free)
954 synctype[nsynctypes++] = RTWS_DEF_FREE;
955 else if (gp_normal && !cur_ops->deferred_free)
956 pr_alert("rcu_torture_writer: gp_normal without primitives.\n");
957 if (gp_sync1 && cur_ops->sync)
958 synctype[nsynctypes++] = RTWS_SYNC;
959 else if (gp_sync && !cur_ops->sync)
960 pr_alert("rcu_torture_writer: gp_sync without primitives.\n");
961 if (WARN_ONCE(nsynctypes == 0,
962 "rcu_torture_writer: No update-side primitives.\n")) {
963 /*
964 * No updates primitives, so don't try updating.
965 * The resulting test won't be testing much, hence the
966 * above WARN_ONCE().
967 */
968 rcu_torture_writer_state = RTWS_STOPPING;
969 torture_kthread_stopping("rcu_torture_writer");
970 }
971
972 do {
973 rcu_torture_writer_state = RTWS_FIXED_DELAY;
974 schedule_timeout_uninterruptible(1);
975 rp = rcu_torture_alloc();
976 if (rp == NULL)
977 continue;
978 rp->rtort_pipe_count = 0;
979 rcu_torture_writer_state = RTWS_DELAY;
980 udelay(torture_random(&rand) & 0x3ff);
981 rcu_torture_writer_state = RTWS_REPLACE;
982 old_rp = rcu_dereference_check(rcu_torture_current,
983 current == writer_task);
984 rp->rtort_mbtest = 1;
985 rcu_assign_pointer(rcu_torture_current, rp);
986 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
987 if (old_rp) {
988 i = old_rp->rtort_pipe_count;
989 if (i > RCU_TORTURE_PIPE_LEN)
990 i = RCU_TORTURE_PIPE_LEN;
991 atomic_inc(&rcu_torture_wcount[i]);
992 old_rp->rtort_pipe_count++;
993 switch (synctype[torture_random(&rand) % nsynctypes]) {
994 case RTWS_DEF_FREE:
995 rcu_torture_writer_state = RTWS_DEF_FREE;
996 cur_ops->deferred_free(old_rp);
997 break;
998 case RTWS_EXP_SYNC:
999 rcu_torture_writer_state = RTWS_EXP_SYNC;
1000 cur_ops->exp_sync();
1001 rcu_torture_pipe_update(old_rp);
1002 break;
1003 case RTWS_COND_GET:
1004 rcu_torture_writer_state = RTWS_COND_GET;
1005 gp_snap = cur_ops->get_state();
1006 i = torture_random(&rand) % 16;
1007 if (i != 0)
1008 schedule_timeout_interruptible(i);
1009 udelay(torture_random(&rand) % 1000);
1010 rcu_torture_writer_state = RTWS_COND_SYNC;
1011 cur_ops->cond_sync(gp_snap);
1012 rcu_torture_pipe_update(old_rp);
1013 break;
1014 case RTWS_SYNC:
1015 rcu_torture_writer_state = RTWS_SYNC;
1016 cur_ops->sync();
1017 rcu_torture_pipe_update(old_rp);
1018 break;
1019 default:
1020 WARN_ON_ONCE(1);
1021 break;
1022 }
1023 }
1024 rcutorture_record_progress(++rcu_torture_current_version);
1025 /* Cycle through nesting levels of rcu_expedite_gp() calls. */
1026 if (can_expedite &&
1027 !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
1028 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
1029 if (expediting >= 0)
1030 rcu_expedite_gp();
1031 else
1032 rcu_unexpedite_gp();
1033 if (++expediting > 3)
1034 expediting = -expediting;
1035 }
1036 rcu_torture_writer_state = RTWS_STUTTER;
1037 stutter_wait("rcu_torture_writer");
1038 } while (!torture_must_stop());
1039 /* Reset expediting back to unexpedited. */
1040 if (expediting > 0)
1041 expediting = -expediting;
1042 while (can_expedite && expediting++ < 0)
1043 rcu_unexpedite_gp();
1044 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
1045 rcu_torture_writer_state = RTWS_STOPPING;
1046 torture_kthread_stopping("rcu_torture_writer");
1047 return 0;
1048 }
1049
1050 /*
1051 * RCU torture fake writer kthread. Repeatedly calls sync, with a random
1052 * delay between calls.
1053 */
1054 static int
1055 rcu_torture_fakewriter(void *arg)
1056 {
1057 DEFINE_TORTURE_RANDOM(rand);
1058
1059 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
1060 set_user_nice(current, MAX_NICE);
1061
1062 do {
1063 schedule_timeout_uninterruptible(1 + torture_random(&rand)%10);
1064 udelay(torture_random(&rand) & 0x3ff);
1065 if (cur_ops->cb_barrier != NULL &&
1066 torture_random(&rand) % (nfakewriters * 8) == 0) {
1067 cur_ops->cb_barrier();
1068 } else if (gp_normal == gp_exp) {
1069 if (torture_random(&rand) & 0x80)
1070 cur_ops->sync();
1071 else
1072 cur_ops->exp_sync();
1073 } else if (gp_normal) {
1074 cur_ops->sync();
1075 } else {
1076 cur_ops->exp_sync();
1077 }
1078 stutter_wait("rcu_torture_fakewriter");
1079 } while (!torture_must_stop());
1080
1081 torture_kthread_stopping("rcu_torture_fakewriter");
1082 return 0;
1083 }
1084
1085 /*
1086 * RCU torture reader from timer handler. Dereferences rcu_torture_current,
1087 * incrementing the corresponding element of the pipeline array. The
1088 * counter in the element should never be greater than 1, otherwise, the
1089 * RCU implementation is broken.
1090 */
1091 static void rcu_torture_timer(unsigned long unused)
1092 {
1093 int idx;
1094 unsigned long started;
1095 unsigned long completed;
1096 static DEFINE_TORTURE_RANDOM(rand);
1097 static DEFINE_SPINLOCK(rand_lock);
1098 struct rcu_torture *p;
1099 int pipe_count;
1100 unsigned long long ts;
1101
1102 idx = cur_ops->readlock();
1103 if (cur_ops->started)
1104 started = cur_ops->started();
1105 else
1106 started = cur_ops->completed();
1107 ts = rcu_trace_clock_local();
1108 p = rcu_dereference_check(rcu_torture_current,
1109 rcu_read_lock_bh_held() ||
1110 rcu_read_lock_sched_held() ||
1111 srcu_read_lock_held(srcu_ctlp) ||
1112 torturing_tasks());
1113 if (p == NULL) {
1114 /* Leave because rcu_torture_writer is not yet underway */
1115 cur_ops->readunlock(idx);
1116 return;
1117 }
1118 if (p->rtort_mbtest == 0)
1119 atomic_inc(&n_rcu_torture_mberror);
1120 spin_lock(&rand_lock);
1121 cur_ops->read_delay(&rand);
1122 n_rcu_torture_timers++;
1123 spin_unlock(&rand_lock);
1124 preempt_disable();
1125 pipe_count = p->rtort_pipe_count;
1126 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1127 /* Should not happen, but... */
1128 pipe_count = RCU_TORTURE_PIPE_LEN;
1129 }
1130 completed = cur_ops->completed();
1131 if (pipe_count > 1) {
1132 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts,
1133 started, completed);
1134 rcu_ftrace_dump(DUMP_ALL);
1135 }
1136 __this_cpu_inc(rcu_torture_count[pipe_count]);
1137 completed = completed - started;
1138 if (cur_ops->started)
1139 completed++;
1140 if (completed > RCU_TORTURE_PIPE_LEN) {
1141 /* Should not happen, but... */
1142 completed = RCU_TORTURE_PIPE_LEN;
1143 }
1144 __this_cpu_inc(rcu_torture_batch[completed]);
1145 preempt_enable();
1146 cur_ops->readunlock(idx);
1147 }
1148
1149 /*
1150 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
1151 * incrementing the corresponding element of the pipeline array. The
1152 * counter in the element should never be greater than 1, otherwise, the
1153 * RCU implementation is broken.
1154 */
1155 static int
1156 rcu_torture_reader(void *arg)
1157 {
1158 unsigned long started;
1159 unsigned long completed;
1160 int idx;
1161 DEFINE_TORTURE_RANDOM(rand);
1162 struct rcu_torture *p;
1163 int pipe_count;
1164 struct timer_list t;
1165 unsigned long long ts;
1166
1167 VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
1168 set_user_nice(current, MAX_NICE);
1169 if (irqreader && cur_ops->irq_capable)
1170 setup_timer_on_stack(&t, rcu_torture_timer, 0);
1171
1172 do {
1173 if (irqreader && cur_ops->irq_capable) {
1174 if (!timer_pending(&t))
1175 mod_timer(&t, jiffies + 1);
1176 }
1177 idx = cur_ops->readlock();
1178 if (cur_ops->started)
1179 started = cur_ops->started();
1180 else
1181 started = cur_ops->completed();
1182 ts = rcu_trace_clock_local();
1183 p = rcu_dereference_check(rcu_torture_current,
1184 rcu_read_lock_bh_held() ||
1185 rcu_read_lock_sched_held() ||
1186 srcu_read_lock_held(srcu_ctlp) ||
1187 torturing_tasks());
1188 if (p == NULL) {
1189 /* Wait for rcu_torture_writer to get underway */
1190 cur_ops->readunlock(idx);
1191 schedule_timeout_interruptible(HZ);
1192 continue;
1193 }
1194 if (p->rtort_mbtest == 0)
1195 atomic_inc(&n_rcu_torture_mberror);
1196 cur_ops->read_delay(&rand);
1197 preempt_disable();
1198 pipe_count = p->rtort_pipe_count;
1199 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1200 /* Should not happen, but... */
1201 pipe_count = RCU_TORTURE_PIPE_LEN;
1202 }
1203 completed = cur_ops->completed();
1204 if (pipe_count > 1) {
1205 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
1206 ts, started, completed);
1207 rcu_ftrace_dump(DUMP_ALL);
1208 }
1209 __this_cpu_inc(rcu_torture_count[pipe_count]);
1210 completed = completed - started;
1211 if (cur_ops->started)
1212 completed++;
1213 if (completed > RCU_TORTURE_PIPE_LEN) {
1214 /* Should not happen, but... */
1215 completed = RCU_TORTURE_PIPE_LEN;
1216 }
1217 __this_cpu_inc(rcu_torture_batch[completed]);
1218 preempt_enable();
1219 cur_ops->readunlock(idx);
1220 stutter_wait("rcu_torture_reader");
1221 } while (!torture_must_stop());
1222 if (irqreader && cur_ops->irq_capable) {
1223 del_timer_sync(&t);
1224 destroy_timer_on_stack(&t);
1225 }
1226 torture_kthread_stopping("rcu_torture_reader");
1227 return 0;
1228 }
1229
1230 /*
1231 * Print torture statistics. Caller must ensure that there is only
1232 * one call to this function at a given time!!! This is normally
1233 * accomplished by relying on the module system to only have one copy
1234 * of the module loaded, and then by giving the rcu_torture_stats
1235 * kthread full control (or the init/cleanup functions when rcu_torture_stats
1236 * thread is not running).
1237 */
1238 static void
1239 rcu_torture_stats_print(void)
1240 {
1241 int cpu;
1242 int i;
1243 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1244 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1245 static unsigned long rtcv_snap = ULONG_MAX;
1246
1247 for_each_possible_cpu(cpu) {
1248 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1249 pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
1250 batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
1251 }
1252 }
1253 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
1254 if (pipesummary[i] != 0)
1255 break;
1256 }
1257
1258 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1259 pr_cont("rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
1260 rcu_torture_current,
1261 rcu_torture_current_version,
1262 list_empty(&rcu_torture_freelist),
1263 atomic_read(&n_rcu_torture_alloc),
1264 atomic_read(&n_rcu_torture_alloc_fail),
1265 atomic_read(&n_rcu_torture_free));
1266 pr_cont("rtmbe: %d rtbke: %ld rtbre: %ld ",
1267 atomic_read(&n_rcu_torture_mberror),
1268 n_rcu_torture_boost_ktrerror,
1269 n_rcu_torture_boost_rterror);
1270 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
1271 n_rcu_torture_boost_failure,
1272 n_rcu_torture_boosts,
1273 n_rcu_torture_timers);
1274 torture_onoff_stats();
1275 pr_cont("barrier: %ld/%ld:%ld ",
1276 n_barrier_successes,
1277 n_barrier_attempts,
1278 n_rcu_torture_barrier_error);
1279 pr_cont("cbflood: %ld\n", atomic_long_read(&n_cbfloods));
1280
1281 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1282 if (atomic_read(&n_rcu_torture_mberror) != 0 ||
1283 n_rcu_torture_barrier_error != 0 ||
1284 n_rcu_torture_boost_ktrerror != 0 ||
1285 n_rcu_torture_boost_rterror != 0 ||
1286 n_rcu_torture_boost_failure != 0 ||
1287 i > 1) {
1288 pr_cont("%s", "!!! ");
1289 atomic_inc(&n_rcu_torture_error);
1290 WARN_ON_ONCE(1);
1291 }
1292 pr_cont("Reader Pipe: ");
1293 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1294 pr_cont(" %ld", pipesummary[i]);
1295 pr_cont("\n");
1296
1297 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1298 pr_cont("Reader Batch: ");
1299 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1300 pr_cont(" %ld", batchsummary[i]);
1301 pr_cont("\n");
1302
1303 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1304 pr_cont("Free-Block Circulation: ");
1305 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1306 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
1307 }
1308 pr_cont("\n");
1309
1310 if (cur_ops->stats)
1311 cur_ops->stats();
1312 if (rtcv_snap == rcu_torture_current_version &&
1313 rcu_torture_current != NULL) {
1314 int __maybe_unused flags;
1315 unsigned long __maybe_unused gpnum;
1316 unsigned long __maybe_unused completed;
1317
1318 rcutorture_get_gp_data(cur_ops->ttype,
1319 &flags, &gpnum, &completed);
1320 pr_alert("??? Writer stall state %s(%d) g%lu c%lu f%#x\n",
1321 rcu_torture_writer_state_getname(),
1322 rcu_torture_writer_state,
1323 gpnum, completed, flags);
1324 show_rcu_gp_kthreads();
1325 rcu_ftrace_dump(DUMP_ALL);
1326 }
1327 rtcv_snap = rcu_torture_current_version;
1328 }
1329
1330 /*
1331 * Periodically prints torture statistics, if periodic statistics printing
1332 * was specified via the stat_interval module parameter.
1333 */
1334 static int
1335 rcu_torture_stats(void *arg)
1336 {
1337 VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
1338 do {
1339 schedule_timeout_interruptible(stat_interval * HZ);
1340 rcu_torture_stats_print();
1341 torture_shutdown_absorb("rcu_torture_stats");
1342 } while (!torture_must_stop());
1343 torture_kthread_stopping("rcu_torture_stats");
1344 return 0;
1345 }
1346
1347 static inline void
1348 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
1349 {
1350 pr_alert("%s" TORTURE_FLAG
1351 "--- %s: nreaders=%d nfakewriters=%d "
1352 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1353 "shuffle_interval=%d stutter=%d irqreader=%d "
1354 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1355 "test_boost=%d/%d test_boost_interval=%d "
1356 "test_boost_duration=%d shutdown_secs=%d "
1357 "stall_cpu=%d stall_cpu_holdoff=%d "
1358 "n_barrier_cbs=%d "
1359 "onoff_interval=%d onoff_holdoff=%d\n",
1360 torture_type, tag, nrealreaders, nfakewriters,
1361 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
1362 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
1363 test_boost, cur_ops->can_boost,
1364 test_boost_interval, test_boost_duration, shutdown_secs,
1365 stall_cpu, stall_cpu_holdoff,
1366 n_barrier_cbs,
1367 onoff_interval, onoff_holdoff);
1368 }
1369
1370 static void rcutorture_booster_cleanup(int cpu)
1371 {
1372 struct task_struct *t;
1373
1374 if (boost_tasks[cpu] == NULL)
1375 return;
1376 mutex_lock(&boost_mutex);
1377 t = boost_tasks[cpu];
1378 boost_tasks[cpu] = NULL;
1379 mutex_unlock(&boost_mutex);
1380
1381 /* This must be outside of the mutex, otherwise deadlock! */
1382 torture_stop_kthread(rcu_torture_boost, t);
1383 }
1384
1385 static int rcutorture_booster_init(int cpu)
1386 {
1387 int retval;
1388
1389 if (boost_tasks[cpu] != NULL)
1390 return 0; /* Already created, nothing more to do. */
1391
1392 /* Don't allow time recalculation while creating a new task. */
1393 mutex_lock(&boost_mutex);
1394 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
1395 boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
1396 cpu_to_node(cpu),
1397 "rcu_torture_boost");
1398 if (IS_ERR(boost_tasks[cpu])) {
1399 retval = PTR_ERR(boost_tasks[cpu]);
1400 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
1401 n_rcu_torture_boost_ktrerror++;
1402 boost_tasks[cpu] = NULL;
1403 mutex_unlock(&boost_mutex);
1404 return retval;
1405 }
1406 kthread_bind(boost_tasks[cpu], cpu);
1407 wake_up_process(boost_tasks[cpu]);
1408 mutex_unlock(&boost_mutex);
1409 return 0;
1410 }
1411
1412 /*
1413 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
1414 * induces a CPU stall for the time specified by stall_cpu.
1415 */
1416 static int rcu_torture_stall(void *args)
1417 {
1418 unsigned long stop_at;
1419
1420 VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
1421 if (stall_cpu_holdoff > 0) {
1422 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
1423 schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
1424 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
1425 }
1426 if (!kthread_should_stop()) {
1427 stop_at = get_seconds() + stall_cpu;
1428 /* RCU CPU stall is expected behavior in following code. */
1429 pr_alert("rcu_torture_stall start.\n");
1430 rcu_read_lock();
1431 preempt_disable();
1432 while (ULONG_CMP_LT(get_seconds(), stop_at))
1433 continue; /* Induce RCU CPU stall warning. */
1434 preempt_enable();
1435 rcu_read_unlock();
1436 pr_alert("rcu_torture_stall end.\n");
1437 }
1438 torture_shutdown_absorb("rcu_torture_stall");
1439 while (!kthread_should_stop())
1440 schedule_timeout_interruptible(10 * HZ);
1441 return 0;
1442 }
1443
1444 /* Spawn CPU-stall kthread, if stall_cpu specified. */
1445 static int __init rcu_torture_stall_init(void)
1446 {
1447 if (stall_cpu <= 0)
1448 return 0;
1449 return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
1450 }
1451
1452 /* Callback function for RCU barrier testing. */
1453 static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
1454 {
1455 atomic_inc(&barrier_cbs_invoked);
1456 }
1457
1458 /* kthread function to register callbacks used to test RCU barriers. */
1459 static int rcu_torture_barrier_cbs(void *arg)
1460 {
1461 long myid = (long)arg;
1462 bool lastphase = 0;
1463 bool newphase;
1464 struct rcu_head rcu;
1465
1466 init_rcu_head_on_stack(&rcu);
1467 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
1468 set_user_nice(current, MAX_NICE);
1469 do {
1470 wait_event(barrier_cbs_wq[myid],
1471 (newphase =
1472 smp_load_acquire(&barrier_phase)) != lastphase ||
1473 torture_must_stop());
1474 lastphase = newphase;
1475 if (torture_must_stop())
1476 break;
1477 /*
1478 * The above smp_load_acquire() ensures barrier_phase load
1479 * is ordered before the folloiwng ->call().
1480 */
1481 local_irq_disable(); /* Just to test no-irq call_rcu(). */
1482 cur_ops->call(&rcu, rcu_torture_barrier_cbf);
1483 local_irq_enable();
1484 if (atomic_dec_and_test(&barrier_cbs_count))
1485 wake_up(&barrier_wq);
1486 } while (!torture_must_stop());
1487 if (cur_ops->cb_barrier != NULL)
1488 cur_ops->cb_barrier();
1489 destroy_rcu_head_on_stack(&rcu);
1490 torture_kthread_stopping("rcu_torture_barrier_cbs");
1491 return 0;
1492 }
1493
1494 /* kthread function to drive and coordinate RCU barrier testing. */
1495 static int rcu_torture_barrier(void *arg)
1496 {
1497 int i;
1498
1499 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
1500 do {
1501 atomic_set(&barrier_cbs_invoked, 0);
1502 atomic_set(&barrier_cbs_count, n_barrier_cbs);
1503 /* Ensure barrier_phase ordered after prior assignments. */
1504 smp_store_release(&barrier_phase, !barrier_phase);
1505 for (i = 0; i < n_barrier_cbs; i++)
1506 wake_up(&barrier_cbs_wq[i]);
1507 wait_event(barrier_wq,
1508 atomic_read(&barrier_cbs_count) == 0 ||
1509 torture_must_stop());
1510 if (torture_must_stop())
1511 break;
1512 n_barrier_attempts++;
1513 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
1514 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
1515 n_rcu_torture_barrier_error++;
1516 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
1517 atomic_read(&barrier_cbs_invoked),
1518 n_barrier_cbs);
1519 WARN_ON_ONCE(1);
1520 }
1521 n_barrier_successes++;
1522 schedule_timeout_interruptible(HZ / 10);
1523 } while (!torture_must_stop());
1524 torture_kthread_stopping("rcu_torture_barrier");
1525 return 0;
1526 }
1527
1528 /* Initialize RCU barrier testing. */
1529 static int rcu_torture_barrier_init(void)
1530 {
1531 int i;
1532 int ret;
1533
1534 if (n_barrier_cbs <= 0)
1535 return 0;
1536 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
1537 pr_alert("%s" TORTURE_FLAG
1538 " Call or barrier ops missing for %s,\n",
1539 torture_type, cur_ops->name);
1540 pr_alert("%s" TORTURE_FLAG
1541 " RCU barrier testing omitted from run.\n",
1542 torture_type);
1543 return 0;
1544 }
1545 atomic_set(&barrier_cbs_count, 0);
1546 atomic_set(&barrier_cbs_invoked, 0);
1547 barrier_cbs_tasks =
1548 kzalloc(n_barrier_cbs * sizeof(barrier_cbs_tasks[0]),
1549 GFP_KERNEL);
1550 barrier_cbs_wq =
1551 kzalloc(n_barrier_cbs * sizeof(barrier_cbs_wq[0]),
1552 GFP_KERNEL);
1553 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
1554 return -ENOMEM;
1555 for (i = 0; i < n_barrier_cbs; i++) {
1556 init_waitqueue_head(&barrier_cbs_wq[i]);
1557 ret = torture_create_kthread(rcu_torture_barrier_cbs,
1558 (void *)(long)i,
1559 barrier_cbs_tasks[i]);
1560 if (ret)
1561 return ret;
1562 }
1563 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
1564 }
1565
1566 /* Clean up after RCU barrier testing. */
1567 static void rcu_torture_barrier_cleanup(void)
1568 {
1569 int i;
1570
1571 torture_stop_kthread(rcu_torture_barrier, barrier_task);
1572 if (barrier_cbs_tasks != NULL) {
1573 for (i = 0; i < n_barrier_cbs; i++)
1574 torture_stop_kthread(rcu_torture_barrier_cbs,
1575 barrier_cbs_tasks[i]);
1576 kfree(barrier_cbs_tasks);
1577 barrier_cbs_tasks = NULL;
1578 }
1579 if (barrier_cbs_wq != NULL) {
1580 kfree(barrier_cbs_wq);
1581 barrier_cbs_wq = NULL;
1582 }
1583 }
1584
1585 static int rcutorture_cpu_notify(struct notifier_block *self,
1586 unsigned long action, void *hcpu)
1587 {
1588 long cpu = (long)hcpu;
1589
1590 switch (action & ~CPU_TASKS_FROZEN) {
1591 case CPU_ONLINE:
1592 case CPU_DOWN_FAILED:
1593 (void)rcutorture_booster_init(cpu);
1594 break;
1595 case CPU_DOWN_PREPARE:
1596 rcutorture_booster_cleanup(cpu);
1597 break;
1598 default:
1599 break;
1600 }
1601 return NOTIFY_OK;
1602 }
1603
1604 static struct notifier_block rcutorture_cpu_nb = {
1605 .notifier_call = rcutorture_cpu_notify,
1606 };
1607
1608 static void
1609 rcu_torture_cleanup(void)
1610 {
1611 int i;
1612
1613 rcutorture_record_test_transition();
1614 if (torture_cleanup_begin()) {
1615 if (cur_ops->cb_barrier != NULL)
1616 cur_ops->cb_barrier();
1617 return;
1618 }
1619
1620 rcu_torture_barrier_cleanup();
1621 torture_stop_kthread(rcu_torture_stall, stall_task);
1622 torture_stop_kthread(rcu_torture_writer, writer_task);
1623
1624 if (reader_tasks) {
1625 for (i = 0; i < nrealreaders; i++)
1626 torture_stop_kthread(rcu_torture_reader,
1627 reader_tasks[i]);
1628 kfree(reader_tasks);
1629 }
1630 rcu_torture_current = NULL;
1631
1632 if (fakewriter_tasks) {
1633 for (i = 0; i < nfakewriters; i++) {
1634 torture_stop_kthread(rcu_torture_fakewriter,
1635 fakewriter_tasks[i]);
1636 }
1637 kfree(fakewriter_tasks);
1638 fakewriter_tasks = NULL;
1639 }
1640
1641 torture_stop_kthread(rcu_torture_stats, stats_task);
1642 torture_stop_kthread(rcu_torture_fqs, fqs_task);
1643 for (i = 0; i < ncbflooders; i++)
1644 torture_stop_kthread(rcu_torture_cbflood, cbflood_task[i]);
1645 if ((test_boost == 1 && cur_ops->can_boost) ||
1646 test_boost == 2) {
1647 unregister_cpu_notifier(&rcutorture_cpu_nb);
1648 for_each_possible_cpu(i)
1649 rcutorture_booster_cleanup(i);
1650 }
1651
1652 /*
1653 * Wait for all RCU callbacks to fire, then do flavor-specific
1654 * cleanup operations.
1655 */
1656 if (cur_ops->cb_barrier != NULL)
1657 cur_ops->cb_barrier();
1658 if (cur_ops->cleanup != NULL)
1659 cur_ops->cleanup();
1660
1661 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
1662
1663 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
1664 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
1665 else if (torture_onoff_failures())
1666 rcu_torture_print_module_parms(cur_ops,
1667 "End of test: RCU_HOTPLUG");
1668 else
1669 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
1670 torture_cleanup_end();
1671 }
1672
1673 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
1674 static void rcu_torture_leak_cb(struct rcu_head *rhp)
1675 {
1676 }
1677
1678 static void rcu_torture_err_cb(struct rcu_head *rhp)
1679 {
1680 /*
1681 * This -might- happen due to race conditions, but is unlikely.
1682 * The scenario that leads to this happening is that the
1683 * first of the pair of duplicate callbacks is queued,
1684 * someone else starts a grace period that includes that
1685 * callback, then the second of the pair must wait for the
1686 * next grace period. Unlikely, but can happen. If it
1687 * does happen, the debug-objects subsystem won't have splatted.
1688 */
1689 pr_alert("rcutorture: duplicated callback was invoked.\n");
1690 }
1691 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
1692
1693 /*
1694 * Verify that double-free causes debug-objects to complain, but only
1695 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test
1696 * cannot be carried out.
1697 */
1698 static void rcu_test_debug_objects(void)
1699 {
1700 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
1701 struct rcu_head rh1;
1702 struct rcu_head rh2;
1703
1704 init_rcu_head_on_stack(&rh1);
1705 init_rcu_head_on_stack(&rh2);
1706 pr_alert("rcutorture: WARN: Duplicate call_rcu() test starting.\n");
1707
1708 /* Try to queue the rh2 pair of callbacks for the same grace period. */
1709 preempt_disable(); /* Prevent preemption from interrupting test. */
1710 rcu_read_lock(); /* Make it impossible to finish a grace period. */
1711 call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */
1712 local_irq_disable(); /* Make it harder to start a new grace period. */
1713 call_rcu(&rh2, rcu_torture_leak_cb);
1714 call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
1715 local_irq_enable();
1716 rcu_read_unlock();
1717 preempt_enable();
1718
1719 /* Wait for them all to get done so we can safely return. */
1720 rcu_barrier();
1721 pr_alert("rcutorture: WARN: Duplicate call_rcu() test complete.\n");
1722 destroy_rcu_head_on_stack(&rh1);
1723 destroy_rcu_head_on_stack(&rh2);
1724 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
1725 pr_alert("rcutorture: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n");
1726 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
1727 }
1728
1729 static int __init
1730 rcu_torture_init(void)
1731 {
1732 int i;
1733 int cpu;
1734 int firsterr = 0;
1735 static struct rcu_torture_ops *torture_ops[] = {
1736 &rcu_ops, &rcu_bh_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
1737 &sched_ops, RCUTORTURE_TASKS_OPS
1738 };
1739
1740 if (!torture_init_begin(torture_type, verbose, &torture_runnable))
1741 return -EBUSY;
1742
1743 /* Process args and tell the world that the torturer is on the job. */
1744 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
1745 cur_ops = torture_ops[i];
1746 if (strcmp(torture_type, cur_ops->name) == 0)
1747 break;
1748 }
1749 if (i == ARRAY_SIZE(torture_ops)) {
1750 pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
1751 torture_type);
1752 pr_alert("rcu-torture types:");
1753 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
1754 pr_alert(" %s", torture_ops[i]->name);
1755 pr_alert("\n");
1756 firsterr = -EINVAL;
1757 goto unwind;
1758 }
1759 if (cur_ops->fqs == NULL && fqs_duration != 0) {
1760 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
1761 fqs_duration = 0;
1762 }
1763 if (cur_ops->init)
1764 cur_ops->init();
1765
1766 if (nreaders >= 0) {
1767 nrealreaders = nreaders;
1768 } else {
1769 nrealreaders = num_online_cpus() - 2 - nreaders;
1770 if (nrealreaders <= 0)
1771 nrealreaders = 1;
1772 }
1773 rcu_torture_print_module_parms(cur_ops, "Start of test");
1774
1775 /* Set up the freelist. */
1776
1777 INIT_LIST_HEAD(&rcu_torture_freelist);
1778 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
1779 rcu_tortures[i].rtort_mbtest = 0;
1780 list_add_tail(&rcu_tortures[i].rtort_free,
1781 &rcu_torture_freelist);
1782 }
1783
1784 /* Initialize the statistics so that each run gets its own numbers. */
1785
1786 rcu_torture_current = NULL;
1787 rcu_torture_current_version = 0;
1788 atomic_set(&n_rcu_torture_alloc, 0);
1789 atomic_set(&n_rcu_torture_alloc_fail, 0);
1790 atomic_set(&n_rcu_torture_free, 0);
1791 atomic_set(&n_rcu_torture_mberror, 0);
1792 atomic_set(&n_rcu_torture_error, 0);
1793 n_rcu_torture_barrier_error = 0;
1794 n_rcu_torture_boost_ktrerror = 0;
1795 n_rcu_torture_boost_rterror = 0;
1796 n_rcu_torture_boost_failure = 0;
1797 n_rcu_torture_boosts = 0;
1798 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1799 atomic_set(&rcu_torture_wcount[i], 0);
1800 for_each_possible_cpu(cpu) {
1801 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1802 per_cpu(rcu_torture_count, cpu)[i] = 0;
1803 per_cpu(rcu_torture_batch, cpu)[i] = 0;
1804 }
1805 }
1806
1807 /* Start up the kthreads. */
1808
1809 firsterr = torture_create_kthread(rcu_torture_writer, NULL,
1810 writer_task);
1811 if (firsterr)
1812 goto unwind;
1813 if (nfakewriters > 0) {
1814 fakewriter_tasks = kzalloc(nfakewriters *
1815 sizeof(fakewriter_tasks[0]),
1816 GFP_KERNEL);
1817 if (fakewriter_tasks == NULL) {
1818 VERBOSE_TOROUT_ERRSTRING("out of memory");
1819 firsterr = -ENOMEM;
1820 goto unwind;
1821 }
1822 }
1823 for (i = 0; i < nfakewriters; i++) {
1824 firsterr = torture_create_kthread(rcu_torture_fakewriter,
1825 NULL, fakewriter_tasks[i]);
1826 if (firsterr)
1827 goto unwind;
1828 }
1829 reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]),
1830 GFP_KERNEL);
1831 if (reader_tasks == NULL) {
1832 VERBOSE_TOROUT_ERRSTRING("out of memory");
1833 firsterr = -ENOMEM;
1834 goto unwind;
1835 }
1836 for (i = 0; i < nrealreaders; i++) {
1837 firsterr = torture_create_kthread(rcu_torture_reader, NULL,
1838 reader_tasks[i]);
1839 if (firsterr)
1840 goto unwind;
1841 }
1842 if (stat_interval > 0) {
1843 firsterr = torture_create_kthread(rcu_torture_stats, NULL,
1844 stats_task);
1845 if (firsterr)
1846 goto unwind;
1847 }
1848 if (test_no_idle_hz && shuffle_interval > 0) {
1849 firsterr = torture_shuffle_init(shuffle_interval * HZ);
1850 if (firsterr)
1851 goto unwind;
1852 }
1853 if (stutter < 0)
1854 stutter = 0;
1855 if (stutter) {
1856 firsterr = torture_stutter_init(stutter * HZ);
1857 if (firsterr)
1858 goto unwind;
1859 }
1860 if (fqs_duration < 0)
1861 fqs_duration = 0;
1862 if (fqs_duration) {
1863 /* Create the fqs thread */
1864 firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
1865 fqs_task);
1866 if (firsterr)
1867 goto unwind;
1868 }
1869 if (test_boost_interval < 1)
1870 test_boost_interval = 1;
1871 if (test_boost_duration < 2)
1872 test_boost_duration = 2;
1873 if ((test_boost == 1 && cur_ops->can_boost) ||
1874 test_boost == 2) {
1875
1876 boost_starttime = jiffies + test_boost_interval * HZ;
1877 register_cpu_notifier(&rcutorture_cpu_nb);
1878 for_each_possible_cpu(i) {
1879 if (cpu_is_offline(i))
1880 continue; /* Heuristic: CPU can go offline. */
1881 firsterr = rcutorture_booster_init(i);
1882 if (firsterr)
1883 goto unwind;
1884 }
1885 }
1886 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
1887 if (firsterr)
1888 goto unwind;
1889 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval * HZ);
1890 if (firsterr)
1891 goto unwind;
1892 firsterr = rcu_torture_stall_init();
1893 if (firsterr)
1894 goto unwind;
1895 firsterr = rcu_torture_barrier_init();
1896 if (firsterr)
1897 goto unwind;
1898 if (object_debug)
1899 rcu_test_debug_objects();
1900 if (cbflood_n_burst > 0) {
1901 /* Create the cbflood threads */
1902 ncbflooders = (num_online_cpus() + 3) / 4;
1903 cbflood_task = kcalloc(ncbflooders, sizeof(*cbflood_task),
1904 GFP_KERNEL);
1905 if (!cbflood_task) {
1906 VERBOSE_TOROUT_ERRSTRING("out of memory");
1907 firsterr = -ENOMEM;
1908 goto unwind;
1909 }
1910 for (i = 0; i < ncbflooders; i++) {
1911 firsterr = torture_create_kthread(rcu_torture_cbflood,
1912 NULL,
1913 cbflood_task[i]);
1914 if (firsterr)
1915 goto unwind;
1916 }
1917 }
1918 rcutorture_record_test_transition();
1919 torture_init_end();
1920 return 0;
1921
1922 unwind:
1923 torture_init_end();
1924 rcu_torture_cleanup();
1925 return firsterr;
1926 }
1927
1928 module_init(rcu_torture_init);
1929 module_exit(rcu_torture_cleanup);
This page took 0.089074 seconds and 5 git commands to generate.