Commit | Line | Data |
---|---|---|
60a11774 SR |
1 | /* Include in trace.c */ |
2 | ||
3 | #include <linux/kthread.h> | |
c7aafc54 | 4 | #include <linux/delay.h> |
60a11774 | 5 | |
e309b41d | 6 | static inline int trace_valid_entry(struct trace_entry *entry) |
60a11774 SR |
7 | { |
8 | switch (entry->type) { | |
9 | case TRACE_FN: | |
10 | case TRACE_CTX: | |
57422797 | 11 | case TRACE_WAKE: |
dd0e545f | 12 | case TRACE_CONT: |
06fa75ab | 13 | case TRACE_STACK: |
dd0e545f | 14 | case TRACE_PRINT: |
06fa75ab | 15 | case TRACE_SPECIAL: |
80e5ea45 | 16 | case TRACE_BRANCH: |
60a11774 SR |
17 | return 1; |
18 | } | |
19 | return 0; | |
20 | } | |
21 | ||
3928a8a2 | 22 | static int trace_test_buffer_cpu(struct trace_array *tr, int cpu) |
60a11774 | 23 | { |
3928a8a2 SR |
24 | struct ring_buffer_event *event; |
25 | struct trace_entry *entry; | |
60a11774 | 26 | |
3928a8a2 SR |
27 | while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) { |
28 | entry = ring_buffer_event_data(event); | |
60a11774 | 29 | |
3928a8a2 | 30 | if (!trace_valid_entry(entry)) { |
c7aafc54 | 31 | printk(KERN_CONT ".. invalid entry %d ", |
3928a8a2 | 32 | entry->type); |
60a11774 SR |
33 | goto failed; |
34 | } | |
60a11774 | 35 | } |
60a11774 SR |
36 | return 0; |
37 | ||
38 | failed: | |
08bafa0e SR |
39 | /* disable tracing */ |
40 | tracing_disabled = 1; | |
60a11774 SR |
41 | printk(KERN_CONT ".. corrupted trace buffer .. "); |
42 | return -1; | |
43 | } | |
44 | ||
45 | /* | |
46 | * Test the trace buffer to see if all the elements | |
47 | * are still sane. | |
48 | */ | |
49 | static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |
50 | { | |
30afdcb1 SR |
51 | unsigned long flags, cnt = 0; |
52 | int cpu, ret = 0; | |
60a11774 | 53 | |
30afdcb1 | 54 | /* Don't allow flipping of max traces now */ |
d51ad7ac | 55 | local_irq_save(flags); |
30afdcb1 | 56 | __raw_spin_lock(&ftrace_max_lock); |
60a11774 | 57 | |
3928a8a2 | 58 | cnt = ring_buffer_entries(tr->buffer); |
60a11774 | 59 | |
3928a8a2 SR |
60 | for_each_possible_cpu(cpu) { |
61 | ret = trace_test_buffer_cpu(tr, cpu); | |
60a11774 SR |
62 | if (ret) |
63 | break; | |
64 | } | |
30afdcb1 | 65 | __raw_spin_unlock(&ftrace_max_lock); |
d51ad7ac | 66 | local_irq_restore(flags); |
60a11774 SR |
67 | |
68 | if (count) | |
69 | *count = cnt; | |
70 | ||
71 | return ret; | |
72 | } | |
73 | ||
1c80025a FW |
74 | static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) |
75 | { | |
76 | printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n", | |
77 | trace->name, init_ret); | |
78 | } | |
606576ce | 79 | #ifdef CONFIG_FUNCTION_TRACER |
77a2b37d SR |
80 | |
81 | #ifdef CONFIG_DYNAMIC_FTRACE | |
82 | ||
77a2b37d SR |
83 | #define __STR(x) #x |
84 | #define STR(x) __STR(x) | |
77a2b37d SR |
85 | |
86 | /* Test dynamic code modification and ftrace filters */ | |
87 | int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |
88 | struct trace_array *tr, | |
89 | int (*func)(void)) | |
90 | { | |
77a2b37d SR |
91 | int save_ftrace_enabled = ftrace_enabled; |
92 | int save_tracer_enabled = tracer_enabled; | |
dd0e545f | 93 | unsigned long count; |
4e491d14 | 94 | char *func_name; |
dd0e545f | 95 | int ret; |
77a2b37d SR |
96 | |
97 | /* The ftrace test PASSED */ | |
98 | printk(KERN_CONT "PASSED\n"); | |
99 | pr_info("Testing dynamic ftrace: "); | |
100 | ||
101 | /* enable tracing, and record the filter function */ | |
102 | ftrace_enabled = 1; | |
103 | tracer_enabled = 1; | |
104 | ||
105 | /* passed in by parameter to fool gcc from optimizing */ | |
106 | func(); | |
107 | ||
4e491d14 SR |
108 | /* |
109 | * Some archs *cough*PowerPC*cough* add charachters to the | |
110 | * start of the function names. We simply put a '*' to | |
111 | * accomodate them. | |
112 | */ | |
113 | func_name = "*" STR(DYN_FTRACE_TEST_NAME); | |
114 | ||
77a2b37d | 115 | /* filter only on our function */ |
4e491d14 | 116 | ftrace_set_filter(func_name, strlen(func_name), 1); |
77a2b37d SR |
117 | |
118 | /* enable tracing */ | |
1c80025a FW |
119 | ret = trace->init(tr); |
120 | if (ret) { | |
121 | warn_failed_init_tracer(trace, ret); | |
122 | goto out; | |
123 | } | |
dd0e545f | 124 | |
77a2b37d SR |
125 | /* Sleep for a 1/10 of a second */ |
126 | msleep(100); | |
127 | ||
128 | /* we should have nothing in the buffer */ | |
129 | ret = trace_test_buffer(tr, &count); | |
130 | if (ret) | |
131 | goto out; | |
132 | ||
133 | if (count) { | |
134 | ret = -1; | |
135 | printk(KERN_CONT ".. filter did not filter .. "); | |
136 | goto out; | |
137 | } | |
138 | ||
139 | /* call our function again */ | |
140 | func(); | |
141 | ||
142 | /* sleep again */ | |
143 | msleep(100); | |
144 | ||
145 | /* stop the tracing. */ | |
bbf5b1a0 | 146 | tracing_stop(); |
77a2b37d SR |
147 | ftrace_enabled = 0; |
148 | ||
149 | /* check the trace buffer */ | |
150 | ret = trace_test_buffer(tr, &count); | |
151 | trace->reset(tr); | |
bbf5b1a0 | 152 | tracing_start(); |
77a2b37d SR |
153 | |
154 | /* we should only have one item */ | |
155 | if (!ret && count != 1) { | |
06fa75ab | 156 | printk(KERN_CONT ".. filter failed count=%ld ..", count); |
77a2b37d SR |
157 | ret = -1; |
158 | goto out; | |
159 | } | |
bbf5b1a0 | 160 | |
77a2b37d SR |
161 | out: |
162 | ftrace_enabled = save_ftrace_enabled; | |
163 | tracer_enabled = save_tracer_enabled; | |
164 | ||
165 | /* Enable tracing on all functions again */ | |
166 | ftrace_set_filter(NULL, 0, 1); | |
167 | ||
168 | return ret; | |
169 | } | |
170 | #else | |
171 | # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) | |
172 | #endif /* CONFIG_DYNAMIC_FTRACE */ | |
60a11774 SR |
173 | /* |
174 | * Simple verification test of ftrace function tracer. | |
175 | * Enable ftrace, sleep 1/10 second, and then read the trace | |
176 | * buffer to see if all is in order. | |
177 | */ | |
178 | int | |
179 | trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | |
180 | { | |
77a2b37d SR |
181 | int save_ftrace_enabled = ftrace_enabled; |
182 | int save_tracer_enabled = tracer_enabled; | |
dd0e545f SR |
183 | unsigned long count; |
184 | int ret; | |
60a11774 | 185 | |
77a2b37d SR |
186 | /* make sure msleep has been recorded */ |
187 | msleep(1); | |
188 | ||
60a11774 | 189 | /* start the tracing */ |
c7aafc54 | 190 | ftrace_enabled = 1; |
77a2b37d | 191 | tracer_enabled = 1; |
c7aafc54 | 192 | |
1c80025a FW |
193 | ret = trace->init(tr); |
194 | if (ret) { | |
195 | warn_failed_init_tracer(trace, ret); | |
196 | goto out; | |
197 | } | |
198 | ||
60a11774 SR |
199 | /* Sleep for a 1/10 of a second */ |
200 | msleep(100); | |
201 | /* stop the tracing. */ | |
bbf5b1a0 | 202 | tracing_stop(); |
c7aafc54 IM |
203 | ftrace_enabled = 0; |
204 | ||
60a11774 SR |
205 | /* check the trace buffer */ |
206 | ret = trace_test_buffer(tr, &count); | |
207 | trace->reset(tr); | |
bbf5b1a0 | 208 | tracing_start(); |
60a11774 SR |
209 | |
210 | if (!ret && !count) { | |
211 | printk(KERN_CONT ".. no entries found .."); | |
212 | ret = -1; | |
77a2b37d | 213 | goto out; |
60a11774 SR |
214 | } |
215 | ||
77a2b37d SR |
216 | ret = trace_selftest_startup_dynamic_tracing(trace, tr, |
217 | DYN_FTRACE_TEST_NAME); | |
218 | ||
219 | out: | |
220 | ftrace_enabled = save_ftrace_enabled; | |
221 | tracer_enabled = save_tracer_enabled; | |
222 | ||
4eebcc81 SR |
223 | /* kill ftrace totally if we failed */ |
224 | if (ret) | |
225 | ftrace_kill(); | |
226 | ||
60a11774 SR |
227 | return ret; |
228 | } | |
606576ce | 229 | #endif /* CONFIG_FUNCTION_TRACER */ |
60a11774 SR |
230 | |
231 | #ifdef CONFIG_IRQSOFF_TRACER | |
232 | int | |
233 | trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | |
234 | { | |
235 | unsigned long save_max = tracing_max_latency; | |
236 | unsigned long count; | |
237 | int ret; | |
238 | ||
239 | /* start the tracing */ | |
1c80025a FW |
240 | ret = trace->init(tr); |
241 | if (ret) { | |
242 | warn_failed_init_tracer(trace, ret); | |
243 | return ret; | |
244 | } | |
245 | ||
60a11774 SR |
246 | /* reset the max latency */ |
247 | tracing_max_latency = 0; | |
248 | /* disable interrupts for a bit */ | |
249 | local_irq_disable(); | |
250 | udelay(100); | |
251 | local_irq_enable(); | |
252 | /* stop the tracing. */ | |
bbf5b1a0 | 253 | tracing_stop(); |
60a11774 SR |
254 | /* check both trace buffers */ |
255 | ret = trace_test_buffer(tr, NULL); | |
256 | if (!ret) | |
257 | ret = trace_test_buffer(&max_tr, &count); | |
258 | trace->reset(tr); | |
bbf5b1a0 | 259 | tracing_start(); |
60a11774 SR |
260 | |
261 | if (!ret && !count) { | |
262 | printk(KERN_CONT ".. no entries found .."); | |
263 | ret = -1; | |
264 | } | |
265 | ||
266 | tracing_max_latency = save_max; | |
267 | ||
268 | return ret; | |
269 | } | |
270 | #endif /* CONFIG_IRQSOFF_TRACER */ | |
271 | ||
272 | #ifdef CONFIG_PREEMPT_TRACER | |
273 | int | |
274 | trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | |
275 | { | |
276 | unsigned long save_max = tracing_max_latency; | |
277 | unsigned long count; | |
278 | int ret; | |
279 | ||
769c48eb SR |
280 | /* |
281 | * Now that the big kernel lock is no longer preemptable, | |
282 | * and this is called with the BKL held, it will always | |
283 | * fail. If preemption is already disabled, simply | |
284 | * pass the test. When the BKL is removed, or becomes | |
285 | * preemptible again, we will once again test this, | |
286 | * so keep it in. | |
287 | */ | |
288 | if (preempt_count()) { | |
289 | printk(KERN_CONT "can not test ... force "); | |
290 | return 0; | |
291 | } | |
292 | ||
60a11774 | 293 | /* start the tracing */ |
1c80025a FW |
294 | ret = trace->init(tr); |
295 | if (ret) { | |
296 | warn_failed_init_tracer(trace, ret); | |
297 | return ret; | |
298 | } | |
299 | ||
60a11774 SR |
300 | /* reset the max latency */ |
301 | tracing_max_latency = 0; | |
302 | /* disable preemption for a bit */ | |
303 | preempt_disable(); | |
304 | udelay(100); | |
305 | preempt_enable(); | |
306 | /* stop the tracing. */ | |
bbf5b1a0 | 307 | tracing_stop(); |
60a11774 SR |
308 | /* check both trace buffers */ |
309 | ret = trace_test_buffer(tr, NULL); | |
310 | if (!ret) | |
311 | ret = trace_test_buffer(&max_tr, &count); | |
312 | trace->reset(tr); | |
bbf5b1a0 | 313 | tracing_start(); |
60a11774 SR |
314 | |
315 | if (!ret && !count) { | |
316 | printk(KERN_CONT ".. no entries found .."); | |
317 | ret = -1; | |
318 | } | |
319 | ||
320 | tracing_max_latency = save_max; | |
321 | ||
322 | return ret; | |
323 | } | |
324 | #endif /* CONFIG_PREEMPT_TRACER */ | |
325 | ||
326 | #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) | |
327 | int | |
328 | trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) | |
329 | { | |
330 | unsigned long save_max = tracing_max_latency; | |
331 | unsigned long count; | |
332 | int ret; | |
333 | ||
769c48eb SR |
334 | /* |
335 | * Now that the big kernel lock is no longer preemptable, | |
336 | * and this is called with the BKL held, it will always | |
337 | * fail. If preemption is already disabled, simply | |
338 | * pass the test. When the BKL is removed, or becomes | |
339 | * preemptible again, we will once again test this, | |
340 | * so keep it in. | |
341 | */ | |
342 | if (preempt_count()) { | |
343 | printk(KERN_CONT "can not test ... force "); | |
344 | return 0; | |
345 | } | |
346 | ||
60a11774 | 347 | /* start the tracing */ |
1c80025a FW |
348 | ret = trace->init(tr); |
349 | if (ret) { | |
350 | warn_failed_init_tracer(trace, ret); | |
351 | goto out; | |
352 | } | |
60a11774 SR |
353 | |
354 | /* reset the max latency */ | |
355 | tracing_max_latency = 0; | |
356 | ||
357 | /* disable preemption and interrupts for a bit */ | |
358 | preempt_disable(); | |
359 | local_irq_disable(); | |
360 | udelay(100); | |
361 | preempt_enable(); | |
362 | /* reverse the order of preempt vs irqs */ | |
363 | local_irq_enable(); | |
364 | ||
365 | /* stop the tracing. */ | |
bbf5b1a0 | 366 | tracing_stop(); |
60a11774 SR |
367 | /* check both trace buffers */ |
368 | ret = trace_test_buffer(tr, NULL); | |
bbf5b1a0 SR |
369 | if (ret) { |
370 | tracing_start(); | |
60a11774 | 371 | goto out; |
bbf5b1a0 | 372 | } |
60a11774 SR |
373 | |
374 | ret = trace_test_buffer(&max_tr, &count); | |
bbf5b1a0 SR |
375 | if (ret) { |
376 | tracing_start(); | |
60a11774 | 377 | goto out; |
bbf5b1a0 | 378 | } |
60a11774 SR |
379 | |
380 | if (!ret && !count) { | |
381 | printk(KERN_CONT ".. no entries found .."); | |
382 | ret = -1; | |
bbf5b1a0 | 383 | tracing_start(); |
60a11774 SR |
384 | goto out; |
385 | } | |
386 | ||
387 | /* do the test by disabling interrupts first this time */ | |
388 | tracing_max_latency = 0; | |
bbf5b1a0 | 389 | tracing_start(); |
60a11774 SR |
390 | preempt_disable(); |
391 | local_irq_disable(); | |
392 | udelay(100); | |
393 | preempt_enable(); | |
394 | /* reverse the order of preempt vs irqs */ | |
395 | local_irq_enable(); | |
396 | ||
397 | /* stop the tracing. */ | |
bbf5b1a0 | 398 | tracing_stop(); |
60a11774 SR |
399 | /* check both trace buffers */ |
400 | ret = trace_test_buffer(tr, NULL); | |
401 | if (ret) | |
402 | goto out; | |
403 | ||
404 | ret = trace_test_buffer(&max_tr, &count); | |
405 | ||
406 | if (!ret && !count) { | |
407 | printk(KERN_CONT ".. no entries found .."); | |
408 | ret = -1; | |
409 | goto out; | |
410 | } | |
411 | ||
412 | out: | |
413 | trace->reset(tr); | |
bbf5b1a0 | 414 | tracing_start(); |
60a11774 SR |
415 | tracing_max_latency = save_max; |
416 | ||
417 | return ret; | |
418 | } | |
419 | #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */ | |
420 | ||
fb1b6d8b SN |
421 | #ifdef CONFIG_NOP_TRACER |
422 | int | |
423 | trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) | |
424 | { | |
425 | /* What could possibly go wrong? */ | |
426 | return 0; | |
427 | } | |
428 | #endif | |
429 | ||
60a11774 SR |
430 | #ifdef CONFIG_SCHED_TRACER |
431 | static int trace_wakeup_test_thread(void *data) | |
432 | { | |
60a11774 | 433 | /* Make this a RT thread, doesn't need to be too high */ |
05bd68c5 SR |
434 | struct sched_param param = { .sched_priority = 5 }; |
435 | struct completion *x = data; | |
60a11774 | 436 | |
05bd68c5 | 437 | sched_setscheduler(current, SCHED_FIFO, ¶m); |
60a11774 SR |
438 | |
439 | /* Make it know we have a new prio */ | |
440 | complete(x); | |
441 | ||
442 | /* now go to sleep and let the test wake us up */ | |
443 | set_current_state(TASK_INTERRUPTIBLE); | |
444 | schedule(); | |
445 | ||
446 | /* we are awake, now wait to disappear */ | |
447 | while (!kthread_should_stop()) { | |
448 | /* | |
449 | * This is an RT task, do short sleeps to let | |
450 | * others run. | |
451 | */ | |
452 | msleep(100); | |
453 | } | |
454 | ||
455 | return 0; | |
456 | } | |
457 | ||
458 | int | |
459 | trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | |
460 | { | |
461 | unsigned long save_max = tracing_max_latency; | |
462 | struct task_struct *p; | |
463 | struct completion isrt; | |
464 | unsigned long count; | |
465 | int ret; | |
466 | ||
467 | init_completion(&isrt); | |
468 | ||
469 | /* create a high prio thread */ | |
470 | p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test"); | |
c7aafc54 | 471 | if (IS_ERR(p)) { |
60a11774 SR |
472 | printk(KERN_CONT "Failed to create ftrace wakeup test thread "); |
473 | return -1; | |
474 | } | |
475 | ||
476 | /* make sure the thread is running at an RT prio */ | |
477 | wait_for_completion(&isrt); | |
478 | ||
479 | /* start the tracing */ | |
1c80025a FW |
480 | ret = trace->init(tr); |
481 | if (ret) { | |
482 | warn_failed_init_tracer(trace, ret); | |
483 | return ret; | |
484 | } | |
485 | ||
60a11774 SR |
486 | /* reset the max latency */ |
487 | tracing_max_latency = 0; | |
488 | ||
489 | /* sleep to let the RT thread sleep too */ | |
490 | msleep(100); | |
491 | ||
492 | /* | |
493 | * Yes this is slightly racy. It is possible that for some | |
494 | * strange reason that the RT thread we created, did not | |
495 | * call schedule for 100ms after doing the completion, | |
496 | * and we do a wakeup on a task that already is awake. | |
497 | * But that is extremely unlikely, and the worst thing that | |
498 | * happens in such a case, is that we disable tracing. | |
499 | * Honestly, if this race does happen something is horrible | |
500 | * wrong with the system. | |
501 | */ | |
502 | ||
503 | wake_up_process(p); | |
504 | ||
5aa60c60 SR |
505 | /* give a little time to let the thread wake up */ |
506 | msleep(100); | |
507 | ||
60a11774 | 508 | /* stop the tracing. */ |
bbf5b1a0 | 509 | tracing_stop(); |
60a11774 SR |
510 | /* check both trace buffers */ |
511 | ret = trace_test_buffer(tr, NULL); | |
512 | if (!ret) | |
513 | ret = trace_test_buffer(&max_tr, &count); | |
514 | ||
515 | ||
516 | trace->reset(tr); | |
bbf5b1a0 | 517 | tracing_start(); |
60a11774 SR |
518 | |
519 | tracing_max_latency = save_max; | |
520 | ||
521 | /* kill the thread */ | |
522 | kthread_stop(p); | |
523 | ||
524 | if (!ret && !count) { | |
525 | printk(KERN_CONT ".. no entries found .."); | |
526 | ret = -1; | |
527 | } | |
528 | ||
529 | return ret; | |
530 | } | |
531 | #endif /* CONFIG_SCHED_TRACER */ | |
532 | ||
533 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER | |
534 | int | |
535 | trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr) | |
536 | { | |
537 | unsigned long count; | |
538 | int ret; | |
539 | ||
540 | /* start the tracing */ | |
1c80025a FW |
541 | ret = trace->init(tr); |
542 | if (ret) { | |
543 | warn_failed_init_tracer(trace, ret); | |
544 | return ret; | |
545 | } | |
546 | ||
60a11774 SR |
547 | /* Sleep for a 1/10 of a second */ |
548 | msleep(100); | |
549 | /* stop the tracing. */ | |
bbf5b1a0 | 550 | tracing_stop(); |
60a11774 SR |
551 | /* check the trace buffer */ |
552 | ret = trace_test_buffer(tr, &count); | |
553 | trace->reset(tr); | |
bbf5b1a0 | 554 | tracing_start(); |
60a11774 SR |
555 | |
556 | if (!ret && !count) { | |
557 | printk(KERN_CONT ".. no entries found .."); | |
558 | ret = -1; | |
559 | } | |
560 | ||
561 | return ret; | |
562 | } | |
563 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ | |
a6dd24f8 IM |
564 | |
565 | #ifdef CONFIG_SYSPROF_TRACER | |
566 | int | |
567 | trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr) | |
568 | { | |
569 | unsigned long count; | |
570 | int ret; | |
571 | ||
572 | /* start the tracing */ | |
1c80025a FW |
573 | ret = trace->init(tr); |
574 | if (ret) { | |
575 | warn_failed_init_tracer(trace, ret); | |
576 | return 0; | |
577 | } | |
578 | ||
a6dd24f8 IM |
579 | /* Sleep for a 1/10 of a second */ |
580 | msleep(100); | |
581 | /* stop the tracing. */ | |
bbf5b1a0 | 582 | tracing_stop(); |
a6dd24f8 IM |
583 | /* check the trace buffer */ |
584 | ret = trace_test_buffer(tr, &count); | |
585 | trace->reset(tr); | |
bbf5b1a0 | 586 | tracing_start(); |
a6dd24f8 | 587 | |
a6dd24f8 IM |
588 | return ret; |
589 | } | |
590 | #endif /* CONFIG_SYSPROF_TRACER */ | |
80e5ea45 SR |
591 | |
592 | #ifdef CONFIG_BRANCH_TRACER | |
593 | int | |
594 | trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | |
595 | { | |
596 | unsigned long count; | |
597 | int ret; | |
598 | ||
599 | /* start the tracing */ | |
1c80025a FW |
600 | ret = trace->init(tr); |
601 | if (ret) { | |
602 | warn_failed_init_tracer(trace, ret); | |
603 | return ret; | |
604 | } | |
605 | ||
80e5ea45 SR |
606 | /* Sleep for a 1/10 of a second */ |
607 | msleep(100); | |
608 | /* stop the tracing. */ | |
609 | tracing_stop(); | |
610 | /* check the trace buffer */ | |
611 | ret = trace_test_buffer(tr, &count); | |
612 | trace->reset(tr); | |
613 | tracing_start(); | |
614 | ||
615 | return ret; | |
616 | } | |
617 | #endif /* CONFIG_BRANCH_TRACER */ |