Merge branch 'linus' into tracing/core
[deliverable/linux.git] / kernel / trace / trace_selftest.c
1 /* Include in trace.c */
2
3 #include <linux/stringify.h>
4 #include <linux/kthread.h>
5 #include <linux/delay.h>
6
7 static inline int trace_valid_entry(struct trace_entry *entry)
8 {
9 switch (entry->type) {
10 case TRACE_FN:
11 case TRACE_CTX:
12 case TRACE_WAKE:
13 case TRACE_STACK:
14 case TRACE_PRINT:
15 case TRACE_SPECIAL:
16 case TRACE_BRANCH:
17 case TRACE_GRAPH_ENT:
18 case TRACE_GRAPH_RET:
19 case TRACE_HW_BRANCHES:
20 return 1;
21 }
22 return 0;
23 }
24
25 static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
26 {
27 struct ring_buffer_event *event;
28 struct trace_entry *entry;
29 unsigned int loops = 0;
30
31 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
32 entry = ring_buffer_event_data(event);
33
34 /*
35 * The ring buffer is a size of trace_buf_size, if
36 * we loop more than the size, there's something wrong
37 * with the ring buffer.
38 */
39 if (loops++ > trace_buf_size) {
40 printk(KERN_CONT ".. bad ring buffer ");
41 goto failed;
42 }
43 if (!trace_valid_entry(entry)) {
44 printk(KERN_CONT ".. invalid entry %d ",
45 entry->type);
46 goto failed;
47 }
48 }
49 return 0;
50
51 failed:
52 /* disable tracing */
53 tracing_disabled = 1;
54 printk(KERN_CONT ".. corrupted trace buffer .. ");
55 return -1;
56 }
57
58 /*
59 * Test the trace buffer to see if all the elements
60 * are still sane.
61 */
62 static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
63 {
64 unsigned long flags, cnt = 0;
65 int cpu, ret = 0;
66
67 /* Don't allow flipping of max traces now */
68 local_irq_save(flags);
69 __raw_spin_lock(&ftrace_max_lock);
70
71 cnt = ring_buffer_entries(tr->buffer);
72
73 /*
74 * The trace_test_buffer_cpu runs a while loop to consume all data.
75 * If the calling tracer is broken, and is constantly filling
76 * the buffer, this will run forever, and hard lock the box.
77 * We disable the ring buffer while we do this test to prevent
78 * a hard lock up.
79 */
80 tracing_off();
81 for_each_possible_cpu(cpu) {
82 ret = trace_test_buffer_cpu(tr, cpu);
83 if (ret)
84 break;
85 }
86 tracing_on();
87 __raw_spin_unlock(&ftrace_max_lock);
88 local_irq_restore(flags);
89
90 if (count)
91 *count = cnt;
92
93 return ret;
94 }
95
96 static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
97 {
98 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
99 trace->name, init_ret);
100 }
101 #ifdef CONFIG_FUNCTION_TRACER
102
103 #ifdef CONFIG_DYNAMIC_FTRACE
104
105 /* Test dynamic code modification and ftrace filters */
106 int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
107 struct trace_array *tr,
108 int (*func)(void))
109 {
110 int save_ftrace_enabled = ftrace_enabled;
111 int save_tracer_enabled = tracer_enabled;
112 unsigned long count;
113 char *func_name;
114 int ret;
115
116 /* The ftrace test PASSED */
117 printk(KERN_CONT "PASSED\n");
118 pr_info("Testing dynamic ftrace: ");
119
120 /* enable tracing, and record the filter function */
121 ftrace_enabled = 1;
122 tracer_enabled = 1;
123
124 /* passed in by parameter to fool gcc from optimizing */
125 func();
126
127 /*
128 * Some archs *cough*PowerPC*cough* add characters to the
129 * start of the function names. We simply put a '*' to
130 * accommodate them.
131 */
132 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
133
134 /* filter only on our function */
135 ftrace_set_filter(func_name, strlen(func_name), 1);
136
137 /* enable tracing */
138 ret = tracer_init(trace, tr);
139 if (ret) {
140 warn_failed_init_tracer(trace, ret);
141 goto out;
142 }
143
144 /* Sleep for a 1/10 of a second */
145 msleep(100);
146
147 /* we should have nothing in the buffer */
148 ret = trace_test_buffer(tr, &count);
149 if (ret)
150 goto out;
151
152 if (count) {
153 ret = -1;
154 printk(KERN_CONT ".. filter did not filter .. ");
155 goto out;
156 }
157
158 /* call our function again */
159 func();
160
161 /* sleep again */
162 msleep(100);
163
164 /* stop the tracing. */
165 tracing_stop();
166 ftrace_enabled = 0;
167
168 /* check the trace buffer */
169 ret = trace_test_buffer(tr, &count);
170 trace->reset(tr);
171 tracing_start();
172
173 /* we should only have one item */
174 if (!ret && count != 1) {
175 printk(KERN_CONT ".. filter failed count=%ld ..", count);
176 ret = -1;
177 goto out;
178 }
179
180 out:
181 ftrace_enabled = save_ftrace_enabled;
182 tracer_enabled = save_tracer_enabled;
183
184 /* Enable tracing on all functions again */
185 ftrace_set_filter(NULL, 0, 1);
186
187 return ret;
188 }
189 #else
190 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
191 #endif /* CONFIG_DYNAMIC_FTRACE */
192
193 /*
194 * Simple verification test of ftrace function tracer.
195 * Enable ftrace, sleep 1/10 second, and then read the trace
196 * buffer to see if all is in order.
197 */
198 int
199 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
200 {
201 int save_ftrace_enabled = ftrace_enabled;
202 int save_tracer_enabled = tracer_enabled;
203 unsigned long count;
204 int ret;
205
206 /* make sure msleep has been recorded */
207 msleep(1);
208
209 /* start the tracing */
210 ftrace_enabled = 1;
211 tracer_enabled = 1;
212
213 ret = tracer_init(trace, tr);
214 if (ret) {
215 warn_failed_init_tracer(trace, ret);
216 goto out;
217 }
218
219 /* Sleep for a 1/10 of a second */
220 msleep(100);
221 /* stop the tracing. */
222 tracing_stop();
223 ftrace_enabled = 0;
224
225 /* check the trace buffer */
226 ret = trace_test_buffer(tr, &count);
227 trace->reset(tr);
228 tracing_start();
229
230 if (!ret && !count) {
231 printk(KERN_CONT ".. no entries found ..");
232 ret = -1;
233 goto out;
234 }
235
236 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
237 DYN_FTRACE_TEST_NAME);
238
239 out:
240 ftrace_enabled = save_ftrace_enabled;
241 tracer_enabled = save_tracer_enabled;
242
243 /* kill ftrace totally if we failed */
244 if (ret)
245 ftrace_kill();
246
247 return ret;
248 }
249 #endif /* CONFIG_FUNCTION_TRACER */
250
251
252 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
253
254 /* Maximum number of functions to trace before diagnosing a hang */
255 #define GRAPH_MAX_FUNC_TEST 100000000
256
257 static void __ftrace_dump(bool disable_tracing);
258 static unsigned int graph_hang_thresh;
259
260 /* Wrap the real function entry probe to avoid possible hanging */
261 static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
262 {
263 /* This is harmlessly racy, we want to approximately detect a hang */
264 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
265 ftrace_graph_stop();
266 printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
267 if (ftrace_dump_on_oops)
268 __ftrace_dump(false);
269 return 0;
270 }
271
272 return trace_graph_entry(trace);
273 }
274
275 /*
276 * Pretty much the same than for the function tracer from which the selftest
277 * has been borrowed.
278 */
279 int
280 trace_selftest_startup_function_graph(struct tracer *trace,
281 struct trace_array *tr)
282 {
283 int ret;
284 unsigned long count;
285
286 /*
287 * Simulate the init() callback but we attach a watchdog callback
288 * to detect and recover from possible hangs
289 */
290 tracing_reset_online_cpus(tr);
291 set_graph_array(tr);
292 ret = register_ftrace_graph(&trace_graph_return,
293 &trace_graph_entry_watchdog);
294 if (ret) {
295 warn_failed_init_tracer(trace, ret);
296 goto out;
297 }
298 tracing_start_cmdline_record();
299
300 /* Sleep for a 1/10 of a second */
301 msleep(100);
302
303 /* Have we just recovered from a hang? */
304 if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
305 tracing_selftest_disabled = true;
306 ret = -1;
307 goto out;
308 }
309
310 tracing_stop();
311
312 /* check the trace buffer */
313 ret = trace_test_buffer(tr, &count);
314
315 trace->reset(tr);
316 tracing_start();
317
318 if (!ret && !count) {
319 printk(KERN_CONT ".. no entries found ..");
320 ret = -1;
321 goto out;
322 }
323
324 /* Don't test dynamic tracing, the function tracer already did */
325
326 out:
327 /* Stop it if we failed */
328 if (ret)
329 ftrace_graph_stop();
330
331 return ret;
332 }
333 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
334
335
336 #ifdef CONFIG_IRQSOFF_TRACER
337 int
338 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
339 {
340 unsigned long save_max = tracing_max_latency;
341 unsigned long count;
342 int ret;
343
344 /* start the tracing */
345 ret = tracer_init(trace, tr);
346 if (ret) {
347 warn_failed_init_tracer(trace, ret);
348 return ret;
349 }
350
351 /* reset the max latency */
352 tracing_max_latency = 0;
353 /* disable interrupts for a bit */
354 local_irq_disable();
355 udelay(100);
356 local_irq_enable();
357
358 /*
359 * Stop the tracer to avoid a warning subsequent
360 * to buffer flipping failure because tracing_stop()
361 * disables the tr and max buffers, making flipping impossible
362 * in case of parallels max irqs off latencies.
363 */
364 trace->stop(tr);
365 /* stop the tracing. */
366 tracing_stop();
367 /* check both trace buffers */
368 ret = trace_test_buffer(tr, NULL);
369 if (!ret)
370 ret = trace_test_buffer(&max_tr, &count);
371 trace->reset(tr);
372 tracing_start();
373
374 if (!ret && !count) {
375 printk(KERN_CONT ".. no entries found ..");
376 ret = -1;
377 }
378
379 tracing_max_latency = save_max;
380
381 return ret;
382 }
383 #endif /* CONFIG_IRQSOFF_TRACER */
384
385 #ifdef CONFIG_PREEMPT_TRACER
386 int
387 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
388 {
389 unsigned long save_max = tracing_max_latency;
390 unsigned long count;
391 int ret;
392
393 /*
394 * Now that the big kernel lock is no longer preemptable,
395 * and this is called with the BKL held, it will always
396 * fail. If preemption is already disabled, simply
397 * pass the test. When the BKL is removed, or becomes
398 * preemptible again, we will once again test this,
399 * so keep it in.
400 */
401 if (preempt_count()) {
402 printk(KERN_CONT "can not test ... force ");
403 return 0;
404 }
405
406 /* start the tracing */
407 ret = tracer_init(trace, tr);
408 if (ret) {
409 warn_failed_init_tracer(trace, ret);
410 return ret;
411 }
412
413 /* reset the max latency */
414 tracing_max_latency = 0;
415 /* disable preemption for a bit */
416 preempt_disable();
417 udelay(100);
418 preempt_enable();
419
420 /*
421 * Stop the tracer to avoid a warning subsequent
422 * to buffer flipping failure because tracing_stop()
423 * disables the tr and max buffers, making flipping impossible
424 * in case of parallels max preempt off latencies.
425 */
426 trace->stop(tr);
427 /* stop the tracing. */
428 tracing_stop();
429 /* check both trace buffers */
430 ret = trace_test_buffer(tr, NULL);
431 if (!ret)
432 ret = trace_test_buffer(&max_tr, &count);
433 trace->reset(tr);
434 tracing_start();
435
436 if (!ret && !count) {
437 printk(KERN_CONT ".. no entries found ..");
438 ret = -1;
439 }
440
441 tracing_max_latency = save_max;
442
443 return ret;
444 }
445 #endif /* CONFIG_PREEMPT_TRACER */
446
447 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
448 int
449 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
450 {
451 unsigned long save_max = tracing_max_latency;
452 unsigned long count;
453 int ret;
454
455 /*
456 * Now that the big kernel lock is no longer preemptable,
457 * and this is called with the BKL held, it will always
458 * fail. If preemption is already disabled, simply
459 * pass the test. When the BKL is removed, or becomes
460 * preemptible again, we will once again test this,
461 * so keep it in.
462 */
463 if (preempt_count()) {
464 printk(KERN_CONT "can not test ... force ");
465 return 0;
466 }
467
468 /* start the tracing */
469 ret = tracer_init(trace, tr);
470 if (ret) {
471 warn_failed_init_tracer(trace, ret);
472 goto out_no_start;
473 }
474
475 /* reset the max latency */
476 tracing_max_latency = 0;
477
478 /* disable preemption and interrupts for a bit */
479 preempt_disable();
480 local_irq_disable();
481 udelay(100);
482 preempt_enable();
483 /* reverse the order of preempt vs irqs */
484 local_irq_enable();
485
486 /*
487 * Stop the tracer to avoid a warning subsequent
488 * to buffer flipping failure because tracing_stop()
489 * disables the tr and max buffers, making flipping impossible
490 * in case of parallels max irqs/preempt off latencies.
491 */
492 trace->stop(tr);
493 /* stop the tracing. */
494 tracing_stop();
495 /* check both trace buffers */
496 ret = trace_test_buffer(tr, NULL);
497 if (ret)
498 goto out;
499
500 ret = trace_test_buffer(&max_tr, &count);
501 if (ret)
502 goto out;
503
504 if (!ret && !count) {
505 printk(KERN_CONT ".. no entries found ..");
506 ret = -1;
507 goto out;
508 }
509
510 /* do the test by disabling interrupts first this time */
511 tracing_max_latency = 0;
512 tracing_start();
513 trace->start(tr);
514
515 preempt_disable();
516 local_irq_disable();
517 udelay(100);
518 preempt_enable();
519 /* reverse the order of preempt vs irqs */
520 local_irq_enable();
521
522 trace->stop(tr);
523 /* stop the tracing. */
524 tracing_stop();
525 /* check both trace buffers */
526 ret = trace_test_buffer(tr, NULL);
527 if (ret)
528 goto out;
529
530 ret = trace_test_buffer(&max_tr, &count);
531
532 if (!ret && !count) {
533 printk(KERN_CONT ".. no entries found ..");
534 ret = -1;
535 goto out;
536 }
537
538 out:
539 tracing_start();
540 out_no_start:
541 trace->reset(tr);
542 tracing_max_latency = save_max;
543
544 return ret;
545 }
546 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
547
548 #ifdef CONFIG_NOP_TRACER
549 int
550 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
551 {
552 /* What could possibly go wrong? */
553 return 0;
554 }
555 #endif
556
557 #ifdef CONFIG_SCHED_TRACER
558 static int trace_wakeup_test_thread(void *data)
559 {
560 /* Make this a RT thread, doesn't need to be too high */
561 struct sched_param param = { .sched_priority = 5 };
562 struct completion *x = data;
563
564 sched_setscheduler(current, SCHED_FIFO, &param);
565
566 /* Make it know we have a new prio */
567 complete(x);
568
569 /* now go to sleep and let the test wake us up */
570 set_current_state(TASK_INTERRUPTIBLE);
571 schedule();
572
573 /* we are awake, now wait to disappear */
574 while (!kthread_should_stop()) {
575 /*
576 * This is an RT task, do short sleeps to let
577 * others run.
578 */
579 msleep(100);
580 }
581
582 return 0;
583 }
584
585 int
586 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
587 {
588 unsigned long save_max = tracing_max_latency;
589 struct task_struct *p;
590 struct completion isrt;
591 unsigned long count;
592 int ret;
593
594 init_completion(&isrt);
595
596 /* create a high prio thread */
597 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
598 if (IS_ERR(p)) {
599 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
600 return -1;
601 }
602
603 /* make sure the thread is running at an RT prio */
604 wait_for_completion(&isrt);
605
606 /* start the tracing */
607 ret = tracer_init(trace, tr);
608 if (ret) {
609 warn_failed_init_tracer(trace, ret);
610 return ret;
611 }
612
613 /* reset the max latency */
614 tracing_max_latency = 0;
615
616 /* sleep to let the RT thread sleep too */
617 msleep(100);
618
619 /*
620 * Yes this is slightly racy. It is possible that for some
621 * strange reason that the RT thread we created, did not
622 * call schedule for 100ms after doing the completion,
623 * and we do a wakeup on a task that already is awake.
624 * But that is extremely unlikely, and the worst thing that
625 * happens in such a case, is that we disable tracing.
626 * Honestly, if this race does happen something is horrible
627 * wrong with the system.
628 */
629
630 wake_up_process(p);
631
632 /* give a little time to let the thread wake up */
633 msleep(100);
634
635 /* stop the tracing. */
636 tracing_stop();
637 /* check both trace buffers */
638 ret = trace_test_buffer(tr, NULL);
639 if (!ret)
640 ret = trace_test_buffer(&max_tr, &count);
641
642
643 trace->reset(tr);
644 tracing_start();
645
646 tracing_max_latency = save_max;
647
648 /* kill the thread */
649 kthread_stop(p);
650
651 if (!ret && !count) {
652 printk(KERN_CONT ".. no entries found ..");
653 ret = -1;
654 }
655
656 return ret;
657 }
658 #endif /* CONFIG_SCHED_TRACER */
659
660 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
661 int
662 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
663 {
664 unsigned long count;
665 int ret;
666
667 /* start the tracing */
668 ret = tracer_init(trace, tr);
669 if (ret) {
670 warn_failed_init_tracer(trace, ret);
671 return ret;
672 }
673
674 /* Sleep for a 1/10 of a second */
675 msleep(100);
676 /* stop the tracing. */
677 tracing_stop();
678 /* check the trace buffer */
679 ret = trace_test_buffer(tr, &count);
680 trace->reset(tr);
681 tracing_start();
682
683 if (!ret && !count) {
684 printk(KERN_CONT ".. no entries found ..");
685 ret = -1;
686 }
687
688 return ret;
689 }
690 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
691
692 #ifdef CONFIG_SYSPROF_TRACER
693 int
694 trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
695 {
696 unsigned long count;
697 int ret;
698
699 /* start the tracing */
700 ret = tracer_init(trace, tr);
701 if (ret) {
702 warn_failed_init_tracer(trace, ret);
703 return ret;
704 }
705
706 /* Sleep for a 1/10 of a second */
707 msleep(100);
708 /* stop the tracing. */
709 tracing_stop();
710 /* check the trace buffer */
711 ret = trace_test_buffer(tr, &count);
712 trace->reset(tr);
713 tracing_start();
714
715 if (!ret && !count) {
716 printk(KERN_CONT ".. no entries found ..");
717 ret = -1;
718 }
719
720 return ret;
721 }
722 #endif /* CONFIG_SYSPROF_TRACER */
723
724 #ifdef CONFIG_BRANCH_TRACER
725 int
726 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
727 {
728 unsigned long count;
729 int ret;
730
731 /* start the tracing */
732 ret = tracer_init(trace, tr);
733 if (ret) {
734 warn_failed_init_tracer(trace, ret);
735 return ret;
736 }
737
738 /* Sleep for a 1/10 of a second */
739 msleep(100);
740 /* stop the tracing. */
741 tracing_stop();
742 /* check the trace buffer */
743 ret = trace_test_buffer(tr, &count);
744 trace->reset(tr);
745 tracing_start();
746
747 if (!ret && !count) {
748 printk(KERN_CONT ".. no entries found ..");
749 ret = -1;
750 }
751
752 return ret;
753 }
754 #endif /* CONFIG_BRANCH_TRACER */
755
756 #ifdef CONFIG_HW_BRANCH_TRACER
757 int
758 trace_selftest_startup_hw_branches(struct tracer *trace,
759 struct trace_array *tr)
760 {
761 struct trace_iterator *iter;
762 struct tracer tracer;
763 unsigned long count;
764 int ret;
765
766 if (!trace->open) {
767 printk(KERN_CONT "missing open function...");
768 return -1;
769 }
770
771 ret = tracer_init(trace, tr);
772 if (ret) {
773 warn_failed_init_tracer(trace, ret);
774 return ret;
775 }
776
777 /*
778 * The hw-branch tracer needs to collect the trace from the various
779 * cpu trace buffers - before tracing is stopped.
780 */
781 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
782 if (!iter)
783 return -ENOMEM;
784
785 memcpy(&tracer, trace, sizeof(tracer));
786
787 iter->trace = &tracer;
788 iter->tr = tr;
789 iter->pos = -1;
790 mutex_init(&iter->mutex);
791
792 trace->open(iter);
793
794 mutex_destroy(&iter->mutex);
795 kfree(iter);
796
797 tracing_stop();
798
799 ret = trace_test_buffer(tr, &count);
800 trace->reset(tr);
801 tracing_start();
802
803 if (!ret && !count) {
804 printk(KERN_CONT "no entries found..");
805 ret = -1;
806 }
807
808 return ret;
809 }
810 #endif /* CONFIG_HW_BRANCH_TRACER */
This page took 0.073309 seconds and 6 git commands to generate.