Merge branch 'bp-remove-pc-buf' into for-next
[deliverable/linux.git] / kernel / trace / trace_selftest.c
1 /* Include in trace.c */
2
3 #include <linux/stringify.h>
4 #include <linux/kthread.h>
5 #include <linux/delay.h>
6
7 static inline int trace_valid_entry(struct trace_entry *entry)
8 {
9 switch (entry->type) {
10 case TRACE_FN:
11 case TRACE_CTX:
12 case TRACE_WAKE:
13 case TRACE_STACK:
14 case TRACE_PRINT:
15 case TRACE_SPECIAL:
16 case TRACE_BRANCH:
17 case TRACE_GRAPH_ENT:
18 case TRACE_GRAPH_RET:
19 case TRACE_HW_BRANCHES:
20 return 1;
21 }
22 return 0;
23 }
24
25 static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
26 {
27 struct ring_buffer_event *event;
28 struct trace_entry *entry;
29 unsigned int loops = 0;
30
31 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
32 entry = ring_buffer_event_data(event);
33
34 /*
35 * The ring buffer is a size of trace_buf_size, if
36 * we loop more than the size, there's something wrong
37 * with the ring buffer.
38 */
39 if (loops++ > trace_buf_size) {
40 printk(KERN_CONT ".. bad ring buffer ");
41 goto failed;
42 }
43 if (!trace_valid_entry(entry)) {
44 printk(KERN_CONT ".. invalid entry %d ",
45 entry->type);
46 goto failed;
47 }
48 }
49 return 0;
50
51 failed:
52 /* disable tracing */
53 tracing_disabled = 1;
54 printk(KERN_CONT ".. corrupted trace buffer .. ");
55 return -1;
56 }
57
58 /*
59 * Test the trace buffer to see if all the elements
60 * are still sane.
61 */
62 static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
63 {
64 unsigned long flags, cnt = 0;
65 int cpu, ret = 0;
66
67 /* Don't allow flipping of max traces now */
68 local_irq_save(flags);
69 __raw_spin_lock(&ftrace_max_lock);
70
71 cnt = ring_buffer_entries(tr->buffer);
72
73 /*
74 * The trace_test_buffer_cpu runs a while loop to consume all data.
75 * If the calling tracer is broken, and is constantly filling
76 * the buffer, this will run forever, and hard lock the box.
77 * We disable the ring buffer while we do this test to prevent
78 * a hard lock up.
79 */
80 tracing_off();
81 for_each_possible_cpu(cpu) {
82 ret = trace_test_buffer_cpu(tr, cpu);
83 if (ret)
84 break;
85 }
86 tracing_on();
87 __raw_spin_unlock(&ftrace_max_lock);
88 local_irq_restore(flags);
89
90 if (count)
91 *count = cnt;
92
93 return ret;
94 }
95
96 static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
97 {
98 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
99 trace->name, init_ret);
100 }
101 #ifdef CONFIG_FUNCTION_TRACER
102
103 #ifdef CONFIG_DYNAMIC_FTRACE
104
105 /* Test dynamic code modification and ftrace filters */
106 int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
107 struct trace_array *tr,
108 int (*func)(void))
109 {
110 int save_ftrace_enabled = ftrace_enabled;
111 int save_tracer_enabled = tracer_enabled;
112 unsigned long count;
113 char *func_name;
114 int ret;
115
116 /* The ftrace test PASSED */
117 printk(KERN_CONT "PASSED\n");
118 pr_info("Testing dynamic ftrace: ");
119
120 /* enable tracing, and record the filter function */
121 ftrace_enabled = 1;
122 tracer_enabled = 1;
123
124 /* passed in by parameter to fool gcc from optimizing */
125 func();
126
127 /*
128 * Some archs *cough*PowerPC*cough* add characters to the
129 * start of the function names. We simply put a '*' to
130 * accommodate them.
131 */
132 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
133
134 /* filter only on our function */
135 ftrace_set_filter(func_name, strlen(func_name), 1);
136
137 /* enable tracing */
138 ret = tracer_init(trace, tr);
139 if (ret) {
140 warn_failed_init_tracer(trace, ret);
141 goto out;
142 }
143
144 /* Sleep for a 1/10 of a second */
145 msleep(100);
146
147 /* we should have nothing in the buffer */
148 ret = trace_test_buffer(tr, &count);
149 if (ret)
150 goto out;
151
152 if (count) {
153 ret = -1;
154 printk(KERN_CONT ".. filter did not filter .. ");
155 goto out;
156 }
157
158 /* call our function again */
159 func();
160
161 /* sleep again */
162 msleep(100);
163
164 /* stop the tracing. */
165 tracing_stop();
166 ftrace_enabled = 0;
167
168 /* check the trace buffer */
169 ret = trace_test_buffer(tr, &count);
170 trace->reset(tr);
171 tracing_start();
172
173 /* we should only have one item */
174 if (!ret && count != 1) {
175 printk(KERN_CONT ".. filter failed count=%ld ..", count);
176 ret = -1;
177 goto out;
178 }
179
180 out:
181 ftrace_enabled = save_ftrace_enabled;
182 tracer_enabled = save_tracer_enabled;
183
184 /* Enable tracing on all functions again */
185 ftrace_set_filter(NULL, 0, 1);
186
187 return ret;
188 }
189 #else
190 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
191 #endif /* CONFIG_DYNAMIC_FTRACE */
192
193 /*
194 * Simple verification test of ftrace function tracer.
195 * Enable ftrace, sleep 1/10 second, and then read the trace
196 * buffer to see if all is in order.
197 */
198 int
199 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
200 {
201 int save_ftrace_enabled = ftrace_enabled;
202 int save_tracer_enabled = tracer_enabled;
203 unsigned long count;
204 int ret;
205
206 /* make sure msleep has been recorded */
207 msleep(1);
208
209 /* start the tracing */
210 ftrace_enabled = 1;
211 tracer_enabled = 1;
212
213 ret = tracer_init(trace, tr);
214 if (ret) {
215 warn_failed_init_tracer(trace, ret);
216 goto out;
217 }
218
219 /* Sleep for a 1/10 of a second */
220 msleep(100);
221 /* stop the tracing. */
222 tracing_stop();
223 ftrace_enabled = 0;
224
225 /* check the trace buffer */
226 ret = trace_test_buffer(tr, &count);
227 trace->reset(tr);
228 tracing_start();
229
230 if (!ret && !count) {
231 printk(KERN_CONT ".. no entries found ..");
232 ret = -1;
233 goto out;
234 }
235
236 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
237 DYN_FTRACE_TEST_NAME);
238
239 out:
240 ftrace_enabled = save_ftrace_enabled;
241 tracer_enabled = save_tracer_enabled;
242
243 /* kill ftrace totally if we failed */
244 if (ret)
245 ftrace_kill();
246
247 return ret;
248 }
249 #endif /* CONFIG_FUNCTION_TRACER */
250
251
252 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
253
254 /* Maximum number of functions to trace before diagnosing a hang */
255 #define GRAPH_MAX_FUNC_TEST 100000000
256
257 static void __ftrace_dump(bool disable_tracing);
258 static unsigned int graph_hang_thresh;
259
260 /* Wrap the real function entry probe to avoid possible hanging */
261 static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
262 {
263 /* This is harmlessly racy, we want to approximately detect a hang */
264 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
265 ftrace_graph_stop();
266 printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
267 if (ftrace_dump_on_oops)
268 __ftrace_dump(false);
269 return 0;
270 }
271
272 return trace_graph_entry(trace);
273 }
274
275 /*
276 * Pretty much the same than for the function tracer from which the selftest
277 * has been borrowed.
278 */
279 int
280 trace_selftest_startup_function_graph(struct tracer *trace,
281 struct trace_array *tr)
282 {
283 int ret;
284 unsigned long count;
285
286 /*
287 * Simulate the init() callback but we attach a watchdog callback
288 * to detect and recover from possible hangs
289 */
290 tracing_reset_online_cpus(tr);
291 ret = register_ftrace_graph(&trace_graph_return,
292 &trace_graph_entry_watchdog);
293 if (ret) {
294 warn_failed_init_tracer(trace, ret);
295 goto out;
296 }
297 tracing_start_cmdline_record();
298
299 /* Sleep for a 1/10 of a second */
300 msleep(100);
301
302 /* Have we just recovered from a hang? */
303 if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
304 tracing_selftest_disabled = true;
305 ret = -1;
306 goto out;
307 }
308
309 tracing_stop();
310
311 /* check the trace buffer */
312 ret = trace_test_buffer(tr, &count);
313
314 trace->reset(tr);
315 tracing_start();
316
317 if (!ret && !count) {
318 printk(KERN_CONT ".. no entries found ..");
319 ret = -1;
320 goto out;
321 }
322
323 /* Don't test dynamic tracing, the function tracer already did */
324
325 out:
326 /* Stop it if we failed */
327 if (ret)
328 ftrace_graph_stop();
329
330 return ret;
331 }
332 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
333
334
335 #ifdef CONFIG_IRQSOFF_TRACER
336 int
337 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
338 {
339 unsigned long save_max = tracing_max_latency;
340 unsigned long count;
341 int ret;
342
343 /* start the tracing */
344 ret = tracer_init(trace, tr);
345 if (ret) {
346 warn_failed_init_tracer(trace, ret);
347 return ret;
348 }
349
350 /* reset the max latency */
351 tracing_max_latency = 0;
352 /* disable interrupts for a bit */
353 local_irq_disable();
354 udelay(100);
355 local_irq_enable();
356
357 /*
358 * Stop the tracer to avoid a warning subsequent
359 * to buffer flipping failure because tracing_stop()
360 * disables the tr and max buffers, making flipping impossible
361 * in case of parallels max irqs off latencies.
362 */
363 trace->stop(tr);
364 /* stop the tracing. */
365 tracing_stop();
366 /* check both trace buffers */
367 ret = trace_test_buffer(tr, NULL);
368 if (!ret)
369 ret = trace_test_buffer(&max_tr, &count);
370 trace->reset(tr);
371 tracing_start();
372
373 if (!ret && !count) {
374 printk(KERN_CONT ".. no entries found ..");
375 ret = -1;
376 }
377
378 tracing_max_latency = save_max;
379
380 return ret;
381 }
382 #endif /* CONFIG_IRQSOFF_TRACER */
383
384 #ifdef CONFIG_PREEMPT_TRACER
385 int
386 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
387 {
388 unsigned long save_max = tracing_max_latency;
389 unsigned long count;
390 int ret;
391
392 /*
393 * Now that the big kernel lock is no longer preemptable,
394 * and this is called with the BKL held, it will always
395 * fail. If preemption is already disabled, simply
396 * pass the test. When the BKL is removed, or becomes
397 * preemptible again, we will once again test this,
398 * so keep it in.
399 */
400 if (preempt_count()) {
401 printk(KERN_CONT "can not test ... force ");
402 return 0;
403 }
404
405 /* start the tracing */
406 ret = tracer_init(trace, tr);
407 if (ret) {
408 warn_failed_init_tracer(trace, ret);
409 return ret;
410 }
411
412 /* reset the max latency */
413 tracing_max_latency = 0;
414 /* disable preemption for a bit */
415 preempt_disable();
416 udelay(100);
417 preempt_enable();
418
419 /*
420 * Stop the tracer to avoid a warning subsequent
421 * to buffer flipping failure because tracing_stop()
422 * disables the tr and max buffers, making flipping impossible
423 * in case of parallels max preempt off latencies.
424 */
425 trace->stop(tr);
426 /* stop the tracing. */
427 tracing_stop();
428 /* check both trace buffers */
429 ret = trace_test_buffer(tr, NULL);
430 if (!ret)
431 ret = trace_test_buffer(&max_tr, &count);
432 trace->reset(tr);
433 tracing_start();
434
435 if (!ret && !count) {
436 printk(KERN_CONT ".. no entries found ..");
437 ret = -1;
438 }
439
440 tracing_max_latency = save_max;
441
442 return ret;
443 }
444 #endif /* CONFIG_PREEMPT_TRACER */
445
446 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
447 int
448 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
449 {
450 unsigned long save_max = tracing_max_latency;
451 unsigned long count;
452 int ret;
453
454 /*
455 * Now that the big kernel lock is no longer preemptable,
456 * and this is called with the BKL held, it will always
457 * fail. If preemption is already disabled, simply
458 * pass the test. When the BKL is removed, or becomes
459 * preemptible again, we will once again test this,
460 * so keep it in.
461 */
462 if (preempt_count()) {
463 printk(KERN_CONT "can not test ... force ");
464 return 0;
465 }
466
467 /* start the tracing */
468 ret = tracer_init(trace, tr);
469 if (ret) {
470 warn_failed_init_tracer(trace, ret);
471 goto out_no_start;
472 }
473
474 /* reset the max latency */
475 tracing_max_latency = 0;
476
477 /* disable preemption and interrupts for a bit */
478 preempt_disable();
479 local_irq_disable();
480 udelay(100);
481 preempt_enable();
482 /* reverse the order of preempt vs irqs */
483 local_irq_enable();
484
485 /*
486 * Stop the tracer to avoid a warning subsequent
487 * to buffer flipping failure because tracing_stop()
488 * disables the tr and max buffers, making flipping impossible
489 * in case of parallels max irqs/preempt off latencies.
490 */
491 trace->stop(tr);
492 /* stop the tracing. */
493 tracing_stop();
494 /* check both trace buffers */
495 ret = trace_test_buffer(tr, NULL);
496 if (ret)
497 goto out;
498
499 ret = trace_test_buffer(&max_tr, &count);
500 if (ret)
501 goto out;
502
503 if (!ret && !count) {
504 printk(KERN_CONT ".. no entries found ..");
505 ret = -1;
506 goto out;
507 }
508
509 /* do the test by disabling interrupts first this time */
510 tracing_max_latency = 0;
511 tracing_start();
512 trace->start(tr);
513
514 preempt_disable();
515 local_irq_disable();
516 udelay(100);
517 preempt_enable();
518 /* reverse the order of preempt vs irqs */
519 local_irq_enable();
520
521 trace->stop(tr);
522 /* stop the tracing. */
523 tracing_stop();
524 /* check both trace buffers */
525 ret = trace_test_buffer(tr, NULL);
526 if (ret)
527 goto out;
528
529 ret = trace_test_buffer(&max_tr, &count);
530
531 if (!ret && !count) {
532 printk(KERN_CONT ".. no entries found ..");
533 ret = -1;
534 goto out;
535 }
536
537 out:
538 tracing_start();
539 out_no_start:
540 trace->reset(tr);
541 tracing_max_latency = save_max;
542
543 return ret;
544 }
545 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
546
547 #ifdef CONFIG_NOP_TRACER
548 int
549 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
550 {
551 /* What could possibly go wrong? */
552 return 0;
553 }
554 #endif
555
556 #ifdef CONFIG_SCHED_TRACER
557 static int trace_wakeup_test_thread(void *data)
558 {
559 /* Make this a RT thread, doesn't need to be too high */
560 struct sched_param param = { .sched_priority = 5 };
561 struct completion *x = data;
562
563 sched_setscheduler(current, SCHED_FIFO, &param);
564
565 /* Make it know we have a new prio */
566 complete(x);
567
568 /* now go to sleep and let the test wake us up */
569 set_current_state(TASK_INTERRUPTIBLE);
570 schedule();
571
572 /* we are awake, now wait to disappear */
573 while (!kthread_should_stop()) {
574 /*
575 * This is an RT task, do short sleeps to let
576 * others run.
577 */
578 msleep(100);
579 }
580
581 return 0;
582 }
583
584 int
585 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
586 {
587 unsigned long save_max = tracing_max_latency;
588 struct task_struct *p;
589 struct completion isrt;
590 unsigned long count;
591 int ret;
592
593 init_completion(&isrt);
594
595 /* create a high prio thread */
596 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
597 if (IS_ERR(p)) {
598 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
599 return -1;
600 }
601
602 /* make sure the thread is running at an RT prio */
603 wait_for_completion(&isrt);
604
605 /* start the tracing */
606 ret = tracer_init(trace, tr);
607 if (ret) {
608 warn_failed_init_tracer(trace, ret);
609 return ret;
610 }
611
612 /* reset the max latency */
613 tracing_max_latency = 0;
614
615 /* sleep to let the RT thread sleep too */
616 msleep(100);
617
618 /*
619 * Yes this is slightly racy. It is possible that for some
620 * strange reason that the RT thread we created, did not
621 * call schedule for 100ms after doing the completion,
622 * and we do a wakeup on a task that already is awake.
623 * But that is extremely unlikely, and the worst thing that
624 * happens in such a case, is that we disable tracing.
625 * Honestly, if this race does happen something is horrible
626 * wrong with the system.
627 */
628
629 wake_up_process(p);
630
631 /* give a little time to let the thread wake up */
632 msleep(100);
633
634 /* stop the tracing. */
635 tracing_stop();
636 /* check both trace buffers */
637 ret = trace_test_buffer(tr, NULL);
638 if (!ret)
639 ret = trace_test_buffer(&max_tr, &count);
640
641
642 trace->reset(tr);
643 tracing_start();
644
645 tracing_max_latency = save_max;
646
647 /* kill the thread */
648 kthread_stop(p);
649
650 if (!ret && !count) {
651 printk(KERN_CONT ".. no entries found ..");
652 ret = -1;
653 }
654
655 return ret;
656 }
657 #endif /* CONFIG_SCHED_TRACER */
658
659 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
660 int
661 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
662 {
663 unsigned long count;
664 int ret;
665
666 /* start the tracing */
667 ret = tracer_init(trace, tr);
668 if (ret) {
669 warn_failed_init_tracer(trace, ret);
670 return ret;
671 }
672
673 /* Sleep for a 1/10 of a second */
674 msleep(100);
675 /* stop the tracing. */
676 tracing_stop();
677 /* check the trace buffer */
678 ret = trace_test_buffer(tr, &count);
679 trace->reset(tr);
680 tracing_start();
681
682 if (!ret && !count) {
683 printk(KERN_CONT ".. no entries found ..");
684 ret = -1;
685 }
686
687 return ret;
688 }
689 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
690
691 #ifdef CONFIG_SYSPROF_TRACER
692 int
693 trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
694 {
695 unsigned long count;
696 int ret;
697
698 /* start the tracing */
699 ret = tracer_init(trace, tr);
700 if (ret) {
701 warn_failed_init_tracer(trace, ret);
702 return ret;
703 }
704
705 /* Sleep for a 1/10 of a second */
706 msleep(100);
707 /* stop the tracing. */
708 tracing_stop();
709 /* check the trace buffer */
710 ret = trace_test_buffer(tr, &count);
711 trace->reset(tr);
712 tracing_start();
713
714 if (!ret && !count) {
715 printk(KERN_CONT ".. no entries found ..");
716 ret = -1;
717 }
718
719 return ret;
720 }
721 #endif /* CONFIG_SYSPROF_TRACER */
722
723 #ifdef CONFIG_BRANCH_TRACER
724 int
725 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
726 {
727 unsigned long count;
728 int ret;
729
730 /* start the tracing */
731 ret = tracer_init(trace, tr);
732 if (ret) {
733 warn_failed_init_tracer(trace, ret);
734 return ret;
735 }
736
737 /* Sleep for a 1/10 of a second */
738 msleep(100);
739 /* stop the tracing. */
740 tracing_stop();
741 /* check the trace buffer */
742 ret = trace_test_buffer(tr, &count);
743 trace->reset(tr);
744 tracing_start();
745
746 if (!ret && !count) {
747 printk(KERN_CONT ".. no entries found ..");
748 ret = -1;
749 }
750
751 return ret;
752 }
753 #endif /* CONFIG_BRANCH_TRACER */
754
755 #ifdef CONFIG_HW_BRANCH_TRACER
756 int
757 trace_selftest_startup_hw_branches(struct tracer *trace,
758 struct trace_array *tr)
759 {
760 struct trace_iterator *iter;
761 struct tracer tracer;
762 unsigned long count;
763 int ret;
764
765 if (!trace->open) {
766 printk(KERN_CONT "missing open function...");
767 return -1;
768 }
769
770 ret = tracer_init(trace, tr);
771 if (ret) {
772 warn_failed_init_tracer(trace, ret);
773 return ret;
774 }
775
776 /*
777 * The hw-branch tracer needs to collect the trace from the various
778 * cpu trace buffers - before tracing is stopped.
779 */
780 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
781 if (!iter)
782 return -ENOMEM;
783
784 memcpy(&tracer, trace, sizeof(tracer));
785
786 iter->trace = &tracer;
787 iter->tr = tr;
788 iter->pos = -1;
789 mutex_init(&iter->mutex);
790
791 trace->open(iter);
792
793 mutex_destroy(&iter->mutex);
794 kfree(iter);
795
796 tracing_stop();
797
798 ret = trace_test_buffer(tr, &count);
799 trace->reset(tr);
800 tracing_start();
801
802 if (!ret && !count) {
803 printk(KERN_CONT "no entries found..");
804 ret = -1;
805 }
806
807 return ret;
808 }
809 #endif /* CONFIG_HW_BRANCH_TRACER */
This page took 0.054713 seconds and 6 git commands to generate.