Commit | Line | Data |
---|---|---|
60a11774 SR |
1 | /* Include in trace.c */ |
2 | ||
3 | #include <linux/kthread.h> | |
c7aafc54 | 4 | #include <linux/delay.h> |
60a11774 | 5 | |
e309b41d | 6 | static inline int trace_valid_entry(struct trace_entry *entry) |
60a11774 SR |
7 | { |
8 | switch (entry->type) { | |
9 | case TRACE_FN: | |
10 | case TRACE_CTX: | |
57422797 | 11 | case TRACE_WAKE: |
06fa75ab | 12 | case TRACE_STACK: |
dd0e545f | 13 | case TRACE_PRINT: |
06fa75ab | 14 | case TRACE_SPECIAL: |
80e5ea45 | 15 | case TRACE_BRANCH: |
60a11774 SR |
16 | return 1; |
17 | } | |
18 | return 0; | |
19 | } | |
20 | ||
3928a8a2 | 21 | static int trace_test_buffer_cpu(struct trace_array *tr, int cpu) |
60a11774 | 22 | { |
3928a8a2 SR |
23 | struct ring_buffer_event *event; |
24 | struct trace_entry *entry; | |
60a11774 | 25 | |
3928a8a2 SR |
26 | while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) { |
27 | entry = ring_buffer_event_data(event); | |
60a11774 | 28 | |
3928a8a2 | 29 | if (!trace_valid_entry(entry)) { |
c7aafc54 | 30 | printk(KERN_CONT ".. invalid entry %d ", |
3928a8a2 | 31 | entry->type); |
60a11774 SR |
32 | goto failed; |
33 | } | |
60a11774 | 34 | } |
60a11774 SR |
35 | return 0; |
36 | ||
37 | failed: | |
08bafa0e SR |
38 | /* disable tracing */ |
39 | tracing_disabled = 1; | |
60a11774 SR |
40 | printk(KERN_CONT ".. corrupted trace buffer .. "); |
41 | return -1; | |
42 | } | |
43 | ||
44 | /* | |
45 | * Test the trace buffer to see if all the elements | |
46 | * are still sane. | |
47 | */ | |
48 | static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |
49 | { | |
30afdcb1 SR |
50 | unsigned long flags, cnt = 0; |
51 | int cpu, ret = 0; | |
60a11774 | 52 | |
30afdcb1 | 53 | /* Don't allow flipping of max traces now */ |
d51ad7ac | 54 | local_irq_save(flags); |
30afdcb1 | 55 | __raw_spin_lock(&ftrace_max_lock); |
60a11774 | 56 | |
3928a8a2 | 57 | cnt = ring_buffer_entries(tr->buffer); |
60a11774 | 58 | |
3928a8a2 SR |
59 | for_each_possible_cpu(cpu) { |
60 | ret = trace_test_buffer_cpu(tr, cpu); | |
60a11774 SR |
61 | if (ret) |
62 | break; | |
63 | } | |
30afdcb1 | 64 | __raw_spin_unlock(&ftrace_max_lock); |
d51ad7ac | 65 | local_irq_restore(flags); |
60a11774 SR |
66 | |
67 | if (count) | |
68 | *count = cnt; | |
69 | ||
70 | return ret; | |
71 | } | |
72 | ||
1c80025a FW |
73 | static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) |
74 | { | |
75 | printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n", | |
76 | trace->name, init_ret); | |
77 | } | |
606576ce | 78 | #ifdef CONFIG_FUNCTION_TRACER |
77a2b37d SR |
79 | |
80 | #ifdef CONFIG_DYNAMIC_FTRACE | |
81 | ||
77a2b37d SR |
82 | #define __STR(x) #x |
83 | #define STR(x) __STR(x) | |
77a2b37d SR |
84 | |
85 | /* Test dynamic code modification and ftrace filters */ | |
86 | int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |
87 | struct trace_array *tr, | |
88 | int (*func)(void)) | |
89 | { | |
77a2b37d SR |
90 | int save_ftrace_enabled = ftrace_enabled; |
91 | int save_tracer_enabled = tracer_enabled; | |
dd0e545f | 92 | unsigned long count; |
4e491d14 | 93 | char *func_name; |
dd0e545f | 94 | int ret; |
77a2b37d SR |
95 | |
96 | /* The ftrace test PASSED */ | |
97 | printk(KERN_CONT "PASSED\n"); | |
98 | pr_info("Testing dynamic ftrace: "); | |
99 | ||
100 | /* enable tracing, and record the filter function */ | |
101 | ftrace_enabled = 1; | |
102 | tracer_enabled = 1; | |
103 | ||
104 | /* passed in by parameter to fool gcc from optimizing */ | |
105 | func(); | |
106 | ||
4e491d14 SR |
107 | /* |
108 | * Some archs *cough*PowerPC*cough* add charachters to the | |
109 | * start of the function names. We simply put a '*' to | |
110 | * accomodate them. | |
111 | */ | |
112 | func_name = "*" STR(DYN_FTRACE_TEST_NAME); | |
113 | ||
77a2b37d | 114 | /* filter only on our function */ |
4e491d14 | 115 | ftrace_set_filter(func_name, strlen(func_name), 1); |
77a2b37d SR |
116 | |
117 | /* enable tracing */ | |
b6f11df2 | 118 | ret = tracer_init(trace, tr); |
1c80025a FW |
119 | if (ret) { |
120 | warn_failed_init_tracer(trace, ret); | |
121 | goto out; | |
122 | } | |
dd0e545f | 123 | |
77a2b37d SR |
124 | /* Sleep for a 1/10 of a second */ |
125 | msleep(100); | |
126 | ||
127 | /* we should have nothing in the buffer */ | |
128 | ret = trace_test_buffer(tr, &count); | |
129 | if (ret) | |
130 | goto out; | |
131 | ||
132 | if (count) { | |
133 | ret = -1; | |
134 | printk(KERN_CONT ".. filter did not filter .. "); | |
135 | goto out; | |
136 | } | |
137 | ||
138 | /* call our function again */ | |
139 | func(); | |
140 | ||
141 | /* sleep again */ | |
142 | msleep(100); | |
143 | ||
144 | /* stop the tracing. */ | |
bbf5b1a0 | 145 | tracing_stop(); |
77a2b37d SR |
146 | ftrace_enabled = 0; |
147 | ||
148 | /* check the trace buffer */ | |
149 | ret = trace_test_buffer(tr, &count); | |
150 | trace->reset(tr); | |
bbf5b1a0 | 151 | tracing_start(); |
77a2b37d SR |
152 | |
153 | /* we should only have one item */ | |
154 | if (!ret && count != 1) { | |
06fa75ab | 155 | printk(KERN_CONT ".. filter failed count=%ld ..", count); |
77a2b37d SR |
156 | ret = -1; |
157 | goto out; | |
158 | } | |
bbf5b1a0 | 159 | |
77a2b37d SR |
160 | out: |
161 | ftrace_enabled = save_ftrace_enabled; | |
162 | tracer_enabled = save_tracer_enabled; | |
163 | ||
164 | /* Enable tracing on all functions again */ | |
165 | ftrace_set_filter(NULL, 0, 1); | |
166 | ||
167 | return ret; | |
168 | } | |
169 | #else | |
170 | # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) | |
171 | #endif /* CONFIG_DYNAMIC_FTRACE */ | |
60a11774 SR |
172 | /* |
173 | * Simple verification test of ftrace function tracer. | |
174 | * Enable ftrace, sleep 1/10 second, and then read the trace | |
175 | * buffer to see if all is in order. | |
176 | */ | |
177 | int | |
178 | trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | |
179 | { | |
77a2b37d SR |
180 | int save_ftrace_enabled = ftrace_enabled; |
181 | int save_tracer_enabled = tracer_enabled; | |
dd0e545f SR |
182 | unsigned long count; |
183 | int ret; | |
60a11774 | 184 | |
77a2b37d SR |
185 | /* make sure msleep has been recorded */ |
186 | msleep(1); | |
187 | ||
60a11774 | 188 | /* start the tracing */ |
c7aafc54 | 189 | ftrace_enabled = 1; |
77a2b37d | 190 | tracer_enabled = 1; |
c7aafc54 | 191 | |
b6f11df2 | 192 | ret = tracer_init(trace, tr); |
1c80025a FW |
193 | if (ret) { |
194 | warn_failed_init_tracer(trace, ret); | |
195 | goto out; | |
196 | } | |
197 | ||
60a11774 SR |
198 | /* Sleep for a 1/10 of a second */ |
199 | msleep(100); | |
200 | /* stop the tracing. */ | |
bbf5b1a0 | 201 | tracing_stop(); |
c7aafc54 IM |
202 | ftrace_enabled = 0; |
203 | ||
60a11774 SR |
204 | /* check the trace buffer */ |
205 | ret = trace_test_buffer(tr, &count); | |
206 | trace->reset(tr); | |
bbf5b1a0 | 207 | tracing_start(); |
60a11774 SR |
208 | |
209 | if (!ret && !count) { | |
210 | printk(KERN_CONT ".. no entries found .."); | |
211 | ret = -1; | |
77a2b37d | 212 | goto out; |
60a11774 SR |
213 | } |
214 | ||
77a2b37d SR |
215 | ret = trace_selftest_startup_dynamic_tracing(trace, tr, |
216 | DYN_FTRACE_TEST_NAME); | |
217 | ||
218 | out: | |
219 | ftrace_enabled = save_ftrace_enabled; | |
220 | tracer_enabled = save_tracer_enabled; | |
221 | ||
4eebcc81 SR |
222 | /* kill ftrace totally if we failed */ |
223 | if (ret) | |
224 | ftrace_kill(); | |
225 | ||
60a11774 SR |
226 | return ret; |
227 | } | |
606576ce | 228 | #endif /* CONFIG_FUNCTION_TRACER */ |
60a11774 SR |
229 | |
230 | #ifdef CONFIG_IRQSOFF_TRACER | |
231 | int | |
232 | trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | |
233 | { | |
234 | unsigned long save_max = tracing_max_latency; | |
235 | unsigned long count; | |
236 | int ret; | |
237 | ||
238 | /* start the tracing */ | |
b6f11df2 | 239 | ret = tracer_init(trace, tr); |
1c80025a FW |
240 | if (ret) { |
241 | warn_failed_init_tracer(trace, ret); | |
242 | return ret; | |
243 | } | |
244 | ||
60a11774 SR |
245 | /* reset the max latency */ |
246 | tracing_max_latency = 0; | |
247 | /* disable interrupts for a bit */ | |
248 | local_irq_disable(); | |
249 | udelay(100); | |
250 | local_irq_enable(); | |
251 | /* stop the tracing. */ | |
bbf5b1a0 | 252 | tracing_stop(); |
60a11774 SR |
253 | /* check both trace buffers */ |
254 | ret = trace_test_buffer(tr, NULL); | |
255 | if (!ret) | |
256 | ret = trace_test_buffer(&max_tr, &count); | |
257 | trace->reset(tr); | |
bbf5b1a0 | 258 | tracing_start(); |
60a11774 SR |
259 | |
260 | if (!ret && !count) { | |
261 | printk(KERN_CONT ".. no entries found .."); | |
262 | ret = -1; | |
263 | } | |
264 | ||
265 | tracing_max_latency = save_max; | |
266 | ||
267 | return ret; | |
268 | } | |
269 | #endif /* CONFIG_IRQSOFF_TRACER */ | |
270 | ||
271 | #ifdef CONFIG_PREEMPT_TRACER | |
272 | int | |
273 | trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | |
274 | { | |
275 | unsigned long save_max = tracing_max_latency; | |
276 | unsigned long count; | |
277 | int ret; | |
278 | ||
769c48eb SR |
279 | /* |
280 | * Now that the big kernel lock is no longer preemptable, | |
281 | * and this is called with the BKL held, it will always | |
282 | * fail. If preemption is already disabled, simply | |
283 | * pass the test. When the BKL is removed, or becomes | |
284 | * preemptible again, we will once again test this, | |
285 | * so keep it in. | |
286 | */ | |
287 | if (preempt_count()) { | |
288 | printk(KERN_CONT "can not test ... force "); | |
289 | return 0; | |
290 | } | |
291 | ||
60a11774 | 292 | /* start the tracing */ |
b6f11df2 | 293 | ret = tracer_init(trace, tr); |
1c80025a FW |
294 | if (ret) { |
295 | warn_failed_init_tracer(trace, ret); | |
296 | return ret; | |
297 | } | |
298 | ||
60a11774 SR |
299 | /* reset the max latency */ |
300 | tracing_max_latency = 0; | |
301 | /* disable preemption for a bit */ | |
302 | preempt_disable(); | |
303 | udelay(100); | |
304 | preempt_enable(); | |
305 | /* stop the tracing. */ | |
bbf5b1a0 | 306 | tracing_stop(); |
60a11774 SR |
307 | /* check both trace buffers */ |
308 | ret = trace_test_buffer(tr, NULL); | |
309 | if (!ret) | |
310 | ret = trace_test_buffer(&max_tr, &count); | |
311 | trace->reset(tr); | |
bbf5b1a0 | 312 | tracing_start(); |
60a11774 SR |
313 | |
314 | if (!ret && !count) { | |
315 | printk(KERN_CONT ".. no entries found .."); | |
316 | ret = -1; | |
317 | } | |
318 | ||
319 | tracing_max_latency = save_max; | |
320 | ||
321 | return ret; | |
322 | } | |
323 | #endif /* CONFIG_PREEMPT_TRACER */ | |
324 | ||
325 | #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) | |
326 | int | |
327 | trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) | |
328 | { | |
329 | unsigned long save_max = tracing_max_latency; | |
330 | unsigned long count; | |
331 | int ret; | |
332 | ||
769c48eb SR |
333 | /* |
334 | * Now that the big kernel lock is no longer preemptable, | |
335 | * and this is called with the BKL held, it will always | |
336 | * fail. If preemption is already disabled, simply | |
337 | * pass the test. When the BKL is removed, or becomes | |
338 | * preemptible again, we will once again test this, | |
339 | * so keep it in. | |
340 | */ | |
341 | if (preempt_count()) { | |
342 | printk(KERN_CONT "can not test ... force "); | |
343 | return 0; | |
344 | } | |
345 | ||
60a11774 | 346 | /* start the tracing */ |
b6f11df2 | 347 | ret = tracer_init(trace, tr); |
1c80025a FW |
348 | if (ret) { |
349 | warn_failed_init_tracer(trace, ret); | |
350 | goto out; | |
351 | } | |
60a11774 SR |
352 | |
353 | /* reset the max latency */ | |
354 | tracing_max_latency = 0; | |
355 | ||
356 | /* disable preemption and interrupts for a bit */ | |
357 | preempt_disable(); | |
358 | local_irq_disable(); | |
359 | udelay(100); | |
360 | preempt_enable(); | |
361 | /* reverse the order of preempt vs irqs */ | |
362 | local_irq_enable(); | |
363 | ||
364 | /* stop the tracing. */ | |
bbf5b1a0 | 365 | tracing_stop(); |
60a11774 SR |
366 | /* check both trace buffers */ |
367 | ret = trace_test_buffer(tr, NULL); | |
bbf5b1a0 SR |
368 | if (ret) { |
369 | tracing_start(); | |
60a11774 | 370 | goto out; |
bbf5b1a0 | 371 | } |
60a11774 SR |
372 | |
373 | ret = trace_test_buffer(&max_tr, &count); | |
bbf5b1a0 SR |
374 | if (ret) { |
375 | tracing_start(); | |
60a11774 | 376 | goto out; |
bbf5b1a0 | 377 | } |
60a11774 SR |
378 | |
379 | if (!ret && !count) { | |
380 | printk(KERN_CONT ".. no entries found .."); | |
381 | ret = -1; | |
bbf5b1a0 | 382 | tracing_start(); |
60a11774 SR |
383 | goto out; |
384 | } | |
385 | ||
386 | /* do the test by disabling interrupts first this time */ | |
387 | tracing_max_latency = 0; | |
bbf5b1a0 | 388 | tracing_start(); |
60a11774 SR |
389 | preempt_disable(); |
390 | local_irq_disable(); | |
391 | udelay(100); | |
392 | preempt_enable(); | |
393 | /* reverse the order of preempt vs irqs */ | |
394 | local_irq_enable(); | |
395 | ||
396 | /* stop the tracing. */ | |
bbf5b1a0 | 397 | tracing_stop(); |
60a11774 SR |
398 | /* check both trace buffers */ |
399 | ret = trace_test_buffer(tr, NULL); | |
400 | if (ret) | |
401 | goto out; | |
402 | ||
403 | ret = trace_test_buffer(&max_tr, &count); | |
404 | ||
405 | if (!ret && !count) { | |
406 | printk(KERN_CONT ".. no entries found .."); | |
407 | ret = -1; | |
408 | goto out; | |
409 | } | |
410 | ||
411 | out: | |
412 | trace->reset(tr); | |
bbf5b1a0 | 413 | tracing_start(); |
60a11774 SR |
414 | tracing_max_latency = save_max; |
415 | ||
416 | return ret; | |
417 | } | |
418 | #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */ | |
419 | ||
fb1b6d8b SN |
420 | #ifdef CONFIG_NOP_TRACER |
421 | int | |
422 | trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) | |
423 | { | |
424 | /* What could possibly go wrong? */ | |
425 | return 0; | |
426 | } | |
427 | #endif | |
428 | ||
60a11774 SR |
429 | #ifdef CONFIG_SCHED_TRACER |
430 | static int trace_wakeup_test_thread(void *data) | |
431 | { | |
60a11774 | 432 | /* Make this a RT thread, doesn't need to be too high */ |
05bd68c5 SR |
433 | struct sched_param param = { .sched_priority = 5 }; |
434 | struct completion *x = data; | |
60a11774 | 435 | |
05bd68c5 | 436 | sched_setscheduler(current, SCHED_FIFO, ¶m); |
60a11774 SR |
437 | |
438 | /* Make it know we have a new prio */ | |
439 | complete(x); | |
440 | ||
441 | /* now go to sleep and let the test wake us up */ | |
442 | set_current_state(TASK_INTERRUPTIBLE); | |
443 | schedule(); | |
444 | ||
445 | /* we are awake, now wait to disappear */ | |
446 | while (!kthread_should_stop()) { | |
447 | /* | |
448 | * This is an RT task, do short sleeps to let | |
449 | * others run. | |
450 | */ | |
451 | msleep(100); | |
452 | } | |
453 | ||
454 | return 0; | |
455 | } | |
456 | ||
457 | int | |
458 | trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | |
459 | { | |
460 | unsigned long save_max = tracing_max_latency; | |
461 | struct task_struct *p; | |
462 | struct completion isrt; | |
463 | unsigned long count; | |
464 | int ret; | |
465 | ||
466 | init_completion(&isrt); | |
467 | ||
468 | /* create a high prio thread */ | |
469 | p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test"); | |
c7aafc54 | 470 | if (IS_ERR(p)) { |
60a11774 SR |
471 | printk(KERN_CONT "Failed to create ftrace wakeup test thread "); |
472 | return -1; | |
473 | } | |
474 | ||
475 | /* make sure the thread is running at an RT prio */ | |
476 | wait_for_completion(&isrt); | |
477 | ||
478 | /* start the tracing */ | |
b6f11df2 | 479 | ret = tracer_init(trace, tr); |
1c80025a FW |
480 | if (ret) { |
481 | warn_failed_init_tracer(trace, ret); | |
482 | return ret; | |
483 | } | |
484 | ||
60a11774 SR |
485 | /* reset the max latency */ |
486 | tracing_max_latency = 0; | |
487 | ||
488 | /* sleep to let the RT thread sleep too */ | |
489 | msleep(100); | |
490 | ||
491 | /* | |
492 | * Yes this is slightly racy. It is possible that for some | |
493 | * strange reason that the RT thread we created, did not | |
494 | * call schedule for 100ms after doing the completion, | |
495 | * and we do a wakeup on a task that already is awake. | |
496 | * But that is extremely unlikely, and the worst thing that | |
497 | * happens in such a case, is that we disable tracing. | |
498 | * Honestly, if this race does happen something is horrible | |
499 | * wrong with the system. | |
500 | */ | |
501 | ||
502 | wake_up_process(p); | |
503 | ||
5aa60c60 SR |
504 | /* give a little time to let the thread wake up */ |
505 | msleep(100); | |
506 | ||
60a11774 | 507 | /* stop the tracing. */ |
bbf5b1a0 | 508 | tracing_stop(); |
60a11774 SR |
509 | /* check both trace buffers */ |
510 | ret = trace_test_buffer(tr, NULL); | |
511 | if (!ret) | |
512 | ret = trace_test_buffer(&max_tr, &count); | |
513 | ||
514 | ||
515 | trace->reset(tr); | |
bbf5b1a0 | 516 | tracing_start(); |
60a11774 SR |
517 | |
518 | tracing_max_latency = save_max; | |
519 | ||
520 | /* kill the thread */ | |
521 | kthread_stop(p); | |
522 | ||
523 | if (!ret && !count) { | |
524 | printk(KERN_CONT ".. no entries found .."); | |
525 | ret = -1; | |
526 | } | |
527 | ||
528 | return ret; | |
529 | } | |
530 | #endif /* CONFIG_SCHED_TRACER */ | |
531 | ||
532 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER | |
533 | int | |
534 | trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr) | |
535 | { | |
536 | unsigned long count; | |
537 | int ret; | |
538 | ||
539 | /* start the tracing */ | |
b6f11df2 | 540 | ret = tracer_init(trace, tr); |
1c80025a FW |
541 | if (ret) { |
542 | warn_failed_init_tracer(trace, ret); | |
543 | return ret; | |
544 | } | |
545 | ||
60a11774 SR |
546 | /* Sleep for a 1/10 of a second */ |
547 | msleep(100); | |
548 | /* stop the tracing. */ | |
bbf5b1a0 | 549 | tracing_stop(); |
60a11774 SR |
550 | /* check the trace buffer */ |
551 | ret = trace_test_buffer(tr, &count); | |
552 | trace->reset(tr); | |
bbf5b1a0 | 553 | tracing_start(); |
60a11774 SR |
554 | |
555 | if (!ret && !count) { | |
556 | printk(KERN_CONT ".. no entries found .."); | |
557 | ret = -1; | |
558 | } | |
559 | ||
560 | return ret; | |
561 | } | |
562 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ | |
a6dd24f8 IM |
563 | |
564 | #ifdef CONFIG_SYSPROF_TRACER | |
565 | int | |
566 | trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr) | |
567 | { | |
568 | unsigned long count; | |
569 | int ret; | |
570 | ||
571 | /* start the tracing */ | |
b6f11df2 | 572 | ret = tracer_init(trace, tr); |
1c80025a FW |
573 | if (ret) { |
574 | warn_failed_init_tracer(trace, ret); | |
575 | return 0; | |
576 | } | |
577 | ||
a6dd24f8 IM |
578 | /* Sleep for a 1/10 of a second */ |
579 | msleep(100); | |
580 | /* stop the tracing. */ | |
bbf5b1a0 | 581 | tracing_stop(); |
a6dd24f8 IM |
582 | /* check the trace buffer */ |
583 | ret = trace_test_buffer(tr, &count); | |
584 | trace->reset(tr); | |
bbf5b1a0 | 585 | tracing_start(); |
a6dd24f8 | 586 | |
a6dd24f8 IM |
587 | return ret; |
588 | } | |
589 | #endif /* CONFIG_SYSPROF_TRACER */ | |
80e5ea45 SR |
590 | |
591 | #ifdef CONFIG_BRANCH_TRACER | |
592 | int | |
593 | trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | |
594 | { | |
595 | unsigned long count; | |
596 | int ret; | |
597 | ||
598 | /* start the tracing */ | |
b6f11df2 | 599 | ret = tracer_init(trace, tr); |
1c80025a FW |
600 | if (ret) { |
601 | warn_failed_init_tracer(trace, ret); | |
602 | return ret; | |
603 | } | |
604 | ||
80e5ea45 SR |
605 | /* Sleep for a 1/10 of a second */ |
606 | msleep(100); | |
607 | /* stop the tracing. */ | |
608 | tracing_stop(); | |
609 | /* check the trace buffer */ | |
610 | ret = trace_test_buffer(tr, &count); | |
611 | trace->reset(tr); | |
612 | tracing_start(); | |
613 | ||
614 | return ret; | |
615 | } | |
616 | #endif /* CONFIG_BRANCH_TRACER */ |