Commit | Line | Data |
---|---|---|
60a11774 SR |
1 | /* Include in trace.c */ |
2 | ||
9cc26a26 | 3 | #include <linux/stringify.h> |
60a11774 | 4 | #include <linux/kthread.h> |
c7aafc54 | 5 | #include <linux/delay.h> |
5a0e3ad6 | 6 | #include <linux/slab.h> |
60a11774 | 7 | |
e309b41d | 8 | static inline int trace_valid_entry(struct trace_entry *entry) |
60a11774 SR |
9 | { |
10 | switch (entry->type) { | |
11 | case TRACE_FN: | |
12 | case TRACE_CTX: | |
57422797 | 13 | case TRACE_WAKE: |
06fa75ab | 14 | case TRACE_STACK: |
dd0e545f | 15 | case TRACE_PRINT: |
80e5ea45 | 16 | case TRACE_BRANCH: |
7447dce9 FW |
17 | case TRACE_GRAPH_ENT: |
18 | case TRACE_GRAPH_RET: | |
60a11774 SR |
19 | return 1; |
20 | } | |
21 | return 0; | |
22 | } | |
23 | ||
12883efb | 24 | static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu) |
60a11774 | 25 | { |
3928a8a2 SR |
26 | struct ring_buffer_event *event; |
27 | struct trace_entry *entry; | |
4b3e3d22 | 28 | unsigned int loops = 0; |
60a11774 | 29 | |
12883efb | 30 | while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) { |
3928a8a2 | 31 | entry = ring_buffer_event_data(event); |
60a11774 | 32 | |
4b3e3d22 SR |
33 | /* |
34 | * The ring buffer is a size of trace_buf_size, if | |
35 | * we loop more than the size, there's something wrong | |
36 | * with the ring buffer. | |
37 | */ | |
38 | if (loops++ > trace_buf_size) { | |
39 | printk(KERN_CONT ".. bad ring buffer "); | |
40 | goto failed; | |
41 | } | |
3928a8a2 | 42 | if (!trace_valid_entry(entry)) { |
c7aafc54 | 43 | printk(KERN_CONT ".. invalid entry %d ", |
3928a8a2 | 44 | entry->type); |
60a11774 SR |
45 | goto failed; |
46 | } | |
60a11774 | 47 | } |
60a11774 SR |
48 | return 0; |
49 | ||
50 | failed: | |
08bafa0e SR |
51 | /* disable tracing */ |
52 | tracing_disabled = 1; | |
60a11774 SR |
53 | printk(KERN_CONT ".. corrupted trace buffer .. "); |
54 | return -1; | |
55 | } | |
56 | ||
57 | /* | |
58 | * Test the trace buffer to see if all the elements | |
59 | * are still sane. | |
60 | */ | |
12883efb | 61 | static int trace_test_buffer(struct trace_buffer *buf, unsigned long *count) |
60a11774 | 62 | { |
30afdcb1 SR |
63 | unsigned long flags, cnt = 0; |
64 | int cpu, ret = 0; | |
60a11774 | 65 | |
30afdcb1 | 66 | /* Don't allow flipping of max traces now */ |
d51ad7ac | 67 | local_irq_save(flags); |
0b9b12c1 | 68 | arch_spin_lock(&buf->tr->max_lock); |
60a11774 | 69 | |
12883efb | 70 | cnt = ring_buffer_entries(buf->buffer); |
60a11774 | 71 | |
0c5119c1 SR |
72 | /* |
73 | * The trace_test_buffer_cpu runs a while loop to consume all data. | |
74 | * If the calling tracer is broken, and is constantly filling | |
75 | * the buffer, this will run forever, and hard lock the box. | |
76 | * We disable the ring buffer while we do this test to prevent | |
77 | * a hard lock up. | |
78 | */ | |
79 | tracing_off(); | |
3928a8a2 | 80 | for_each_possible_cpu(cpu) { |
12883efb | 81 | ret = trace_test_buffer_cpu(buf, cpu); |
60a11774 SR |
82 | if (ret) |
83 | break; | |
84 | } | |
0c5119c1 | 85 | tracing_on(); |
0b9b12c1 | 86 | arch_spin_unlock(&buf->tr->max_lock); |
d51ad7ac | 87 | local_irq_restore(flags); |
60a11774 SR |
88 | |
89 | if (count) | |
90 | *count = cnt; | |
91 | ||
92 | return ret; | |
93 | } | |
94 | ||
1c80025a FW |
95 | static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) |
96 | { | |
97 | printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n", | |
98 | trace->name, init_ret); | |
99 | } | |
606576ce | 100 | #ifdef CONFIG_FUNCTION_TRACER |
77a2b37d SR |
101 | |
102 | #ifdef CONFIG_DYNAMIC_FTRACE | |
103 | ||
95950c2e SR |
104 | static int trace_selftest_test_probe1_cnt; |
105 | static void trace_selftest_test_probe1_func(unsigned long ip, | |
2f5f6ad9 | 106 | unsigned long pip, |
a1e2e31d SR |
107 | struct ftrace_ops *op, |
108 | struct pt_regs *pt_regs) | |
95950c2e SR |
109 | { |
110 | trace_selftest_test_probe1_cnt++; | |
111 | } | |
112 | ||
113 | static int trace_selftest_test_probe2_cnt; | |
114 | static void trace_selftest_test_probe2_func(unsigned long ip, | |
2f5f6ad9 | 115 | unsigned long pip, |
a1e2e31d SR |
116 | struct ftrace_ops *op, |
117 | struct pt_regs *pt_regs) | |
95950c2e SR |
118 | { |
119 | trace_selftest_test_probe2_cnt++; | |
120 | } | |
121 | ||
122 | static int trace_selftest_test_probe3_cnt; | |
123 | static void trace_selftest_test_probe3_func(unsigned long ip, | |
2f5f6ad9 | 124 | unsigned long pip, |
a1e2e31d SR |
125 | struct ftrace_ops *op, |
126 | struct pt_regs *pt_regs) | |
95950c2e SR |
127 | { |
128 | trace_selftest_test_probe3_cnt++; | |
129 | } | |
130 | ||
131 | static int trace_selftest_test_global_cnt; | |
132 | static void trace_selftest_test_global_func(unsigned long ip, | |
2f5f6ad9 | 133 | unsigned long pip, |
a1e2e31d SR |
134 | struct ftrace_ops *op, |
135 | struct pt_regs *pt_regs) | |
95950c2e SR |
136 | { |
137 | trace_selftest_test_global_cnt++; | |
138 | } | |
139 | ||
140 | static int trace_selftest_test_dyn_cnt; | |
141 | static void trace_selftest_test_dyn_func(unsigned long ip, | |
2f5f6ad9 | 142 | unsigned long pip, |
a1e2e31d SR |
143 | struct ftrace_ops *op, |
144 | struct pt_regs *pt_regs) | |
95950c2e SR |
145 | { |
146 | trace_selftest_test_dyn_cnt++; | |
147 | } | |
148 | ||
149 | static struct ftrace_ops test_probe1 = { | |
150 | .func = trace_selftest_test_probe1_func, | |
4740974a | 151 | .flags = FTRACE_OPS_FL_RECURSION_SAFE, |
95950c2e SR |
152 | }; |
153 | ||
154 | static struct ftrace_ops test_probe2 = { | |
155 | .func = trace_selftest_test_probe2_func, | |
4740974a | 156 | .flags = FTRACE_OPS_FL_RECURSION_SAFE, |
95950c2e SR |
157 | }; |
158 | ||
159 | static struct ftrace_ops test_probe3 = { | |
160 | .func = trace_selftest_test_probe3_func, | |
4740974a | 161 | .flags = FTRACE_OPS_FL_RECURSION_SAFE, |
95950c2e SR |
162 | }; |
163 | ||
95950c2e SR |
164 | static void print_counts(void) |
165 | { | |
166 | printk("(%d %d %d %d %d) ", | |
167 | trace_selftest_test_probe1_cnt, | |
168 | trace_selftest_test_probe2_cnt, | |
169 | trace_selftest_test_probe3_cnt, | |
170 | trace_selftest_test_global_cnt, | |
171 | trace_selftest_test_dyn_cnt); | |
172 | } | |
173 | ||
174 | static void reset_counts(void) | |
175 | { | |
176 | trace_selftest_test_probe1_cnt = 0; | |
177 | trace_selftest_test_probe2_cnt = 0; | |
178 | trace_selftest_test_probe3_cnt = 0; | |
179 | trace_selftest_test_global_cnt = 0; | |
180 | trace_selftest_test_dyn_cnt = 0; | |
181 | } | |
182 | ||
4104d326 | 183 | static int trace_selftest_ops(struct trace_array *tr, int cnt) |
95950c2e SR |
184 | { |
185 | int save_ftrace_enabled = ftrace_enabled; | |
186 | struct ftrace_ops *dyn_ops; | |
187 | char *func1_name; | |
188 | char *func2_name; | |
189 | int len1; | |
190 | int len2; | |
191 | int ret = -1; | |
192 | ||
193 | printk(KERN_CONT "PASSED\n"); | |
194 | pr_info("Testing dynamic ftrace ops #%d: ", cnt); | |
195 | ||
196 | ftrace_enabled = 1; | |
197 | reset_counts(); | |
198 | ||
199 | /* Handle PPC64 '.' name */ | |
200 | func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME); | |
201 | func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2); | |
202 | len1 = strlen(func1_name); | |
203 | len2 = strlen(func2_name); | |
204 | ||
205 | /* | |
206 | * Probe 1 will trace function 1. | |
207 | * Probe 2 will trace function 2. | |
208 | * Probe 3 will trace functions 1 and 2. | |
209 | */ | |
210 | ftrace_set_filter(&test_probe1, func1_name, len1, 1); | |
211 | ftrace_set_filter(&test_probe2, func2_name, len2, 1); | |
212 | ftrace_set_filter(&test_probe3, func1_name, len1, 1); | |
213 | ftrace_set_filter(&test_probe3, func2_name, len2, 0); | |
214 | ||
215 | register_ftrace_function(&test_probe1); | |
216 | register_ftrace_function(&test_probe2); | |
217 | register_ftrace_function(&test_probe3); | |
4104d326 SRRH |
218 | /* First time we are running with main function */ |
219 | if (cnt > 1) { | |
220 | ftrace_init_array_ops(tr, trace_selftest_test_global_func); | |
221 | register_ftrace_function(tr->ops); | |
222 | } | |
95950c2e SR |
223 | |
224 | DYN_FTRACE_TEST_NAME(); | |
225 | ||
226 | print_counts(); | |
227 | ||
228 | if (trace_selftest_test_probe1_cnt != 1) | |
229 | goto out; | |
230 | if (trace_selftest_test_probe2_cnt != 0) | |
231 | goto out; | |
232 | if (trace_selftest_test_probe3_cnt != 1) | |
233 | goto out; | |
4104d326 SRRH |
234 | if (cnt > 1) { |
235 | if (trace_selftest_test_global_cnt == 0) | |
236 | goto out; | |
237 | } | |
95950c2e SR |
238 | |
239 | DYN_FTRACE_TEST_NAME2(); | |
240 | ||
241 | print_counts(); | |
242 | ||
243 | if (trace_selftest_test_probe1_cnt != 1) | |
244 | goto out; | |
245 | if (trace_selftest_test_probe2_cnt != 1) | |
246 | goto out; | |
247 | if (trace_selftest_test_probe3_cnt != 2) | |
248 | goto out; | |
249 | ||
250 | /* Add a dynamic probe */ | |
251 | dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL); | |
252 | if (!dyn_ops) { | |
253 | printk("MEMORY ERROR "); | |
254 | goto out; | |
255 | } | |
256 | ||
257 | dyn_ops->func = trace_selftest_test_dyn_func; | |
258 | ||
259 | register_ftrace_function(dyn_ops); | |
260 | ||
261 | trace_selftest_test_global_cnt = 0; | |
262 | ||
263 | DYN_FTRACE_TEST_NAME(); | |
264 | ||
265 | print_counts(); | |
266 | ||
267 | if (trace_selftest_test_probe1_cnt != 2) | |
268 | goto out_free; | |
269 | if (trace_selftest_test_probe2_cnt != 1) | |
270 | goto out_free; | |
271 | if (trace_selftest_test_probe3_cnt != 3) | |
272 | goto out_free; | |
4104d326 SRRH |
273 | if (cnt > 1) { |
274 | if (trace_selftest_test_global_cnt == 0) | |
275 | goto out; | |
276 | } | |
95950c2e SR |
277 | if (trace_selftest_test_dyn_cnt == 0) |
278 | goto out_free; | |
279 | ||
280 | DYN_FTRACE_TEST_NAME2(); | |
281 | ||
282 | print_counts(); | |
283 | ||
284 | if (trace_selftest_test_probe1_cnt != 2) | |
285 | goto out_free; | |
286 | if (trace_selftest_test_probe2_cnt != 2) | |
287 | goto out_free; | |
288 | if (trace_selftest_test_probe3_cnt != 4) | |
289 | goto out_free; | |
290 | ||
291 | ret = 0; | |
292 | out_free: | |
293 | unregister_ftrace_function(dyn_ops); | |
294 | kfree(dyn_ops); | |
295 | ||
296 | out: | |
297 | /* Purposely unregister in the same order */ | |
298 | unregister_ftrace_function(&test_probe1); | |
299 | unregister_ftrace_function(&test_probe2); | |
300 | unregister_ftrace_function(&test_probe3); | |
4104d326 SRRH |
301 | if (cnt > 1) |
302 | unregister_ftrace_function(tr->ops); | |
303 | ftrace_reset_array_ops(tr); | |
95950c2e SR |
304 | |
305 | /* Make sure everything is off */ | |
306 | reset_counts(); | |
307 | DYN_FTRACE_TEST_NAME(); | |
308 | DYN_FTRACE_TEST_NAME(); | |
309 | ||
310 | if (trace_selftest_test_probe1_cnt || | |
311 | trace_selftest_test_probe2_cnt || | |
312 | trace_selftest_test_probe3_cnt || | |
313 | trace_selftest_test_global_cnt || | |
314 | trace_selftest_test_dyn_cnt) | |
315 | ret = -1; | |
316 | ||
317 | ftrace_enabled = save_ftrace_enabled; | |
318 | ||
319 | return ret; | |
320 | } | |
321 | ||
77a2b37d | 322 | /* Test dynamic code modification and ftrace filters */ |
ad1438a0 FF |
323 | static int trace_selftest_startup_dynamic_tracing(struct tracer *trace, |
324 | struct trace_array *tr, | |
325 | int (*func)(void)) | |
77a2b37d | 326 | { |
77a2b37d | 327 | int save_ftrace_enabled = ftrace_enabled; |
dd0e545f | 328 | unsigned long count; |
4e491d14 | 329 | char *func_name; |
dd0e545f | 330 | int ret; |
77a2b37d SR |
331 | |
332 | /* The ftrace test PASSED */ | |
333 | printk(KERN_CONT "PASSED\n"); | |
334 | pr_info("Testing dynamic ftrace: "); | |
335 | ||
336 | /* enable tracing, and record the filter function */ | |
337 | ftrace_enabled = 1; | |
77a2b37d SR |
338 | |
339 | /* passed in by parameter to fool gcc from optimizing */ | |
340 | func(); | |
341 | ||
4e491d14 | 342 | /* |
73d8b8bc | 343 | * Some archs *cough*PowerPC*cough* add characters to the |
4e491d14 | 344 | * start of the function names. We simply put a '*' to |
73d8b8bc | 345 | * accommodate them. |
4e491d14 | 346 | */ |
9cc26a26 | 347 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); |
4e491d14 | 348 | |
77a2b37d | 349 | /* filter only on our function */ |
936e074b | 350 | ftrace_set_global_filter(func_name, strlen(func_name), 1); |
77a2b37d SR |
351 | |
352 | /* enable tracing */ | |
b6f11df2 | 353 | ret = tracer_init(trace, tr); |
1c80025a FW |
354 | if (ret) { |
355 | warn_failed_init_tracer(trace, ret); | |
356 | goto out; | |
357 | } | |
dd0e545f | 358 | |
77a2b37d SR |
359 | /* Sleep for a 1/10 of a second */ |
360 | msleep(100); | |
361 | ||
362 | /* we should have nothing in the buffer */ | |
12883efb | 363 | ret = trace_test_buffer(&tr->trace_buffer, &count); |
77a2b37d SR |
364 | if (ret) |
365 | goto out; | |
366 | ||
367 | if (count) { | |
368 | ret = -1; | |
369 | printk(KERN_CONT ".. filter did not filter .. "); | |
370 | goto out; | |
371 | } | |
372 | ||
373 | /* call our function again */ | |
374 | func(); | |
375 | ||
376 | /* sleep again */ | |
377 | msleep(100); | |
378 | ||
379 | /* stop the tracing. */ | |
bbf5b1a0 | 380 | tracing_stop(); |
77a2b37d SR |
381 | ftrace_enabled = 0; |
382 | ||
383 | /* check the trace buffer */ | |
12883efb | 384 | ret = trace_test_buffer(&tr->trace_buffer, &count); |
bbf5b1a0 | 385 | tracing_start(); |
77a2b37d SR |
386 | |
387 | /* we should only have one item */ | |
388 | if (!ret && count != 1) { | |
95950c2e | 389 | trace->reset(tr); |
06fa75ab | 390 | printk(KERN_CONT ".. filter failed count=%ld ..", count); |
77a2b37d SR |
391 | ret = -1; |
392 | goto out; | |
393 | } | |
bbf5b1a0 | 394 | |
95950c2e | 395 | /* Test the ops with global tracing running */ |
4104d326 | 396 | ret = trace_selftest_ops(tr, 1); |
95950c2e SR |
397 | trace->reset(tr); |
398 | ||
77a2b37d SR |
399 | out: |
400 | ftrace_enabled = save_ftrace_enabled; | |
77a2b37d SR |
401 | |
402 | /* Enable tracing on all functions again */ | |
936e074b | 403 | ftrace_set_global_filter(NULL, 0, 1); |
77a2b37d | 404 | |
95950c2e SR |
405 | /* Test the ops with global tracing off */ |
406 | if (!ret) | |
4104d326 | 407 | ret = trace_selftest_ops(tr, 2); |
95950c2e | 408 | |
77a2b37d SR |
409 | return ret; |
410 | } | |
ea701f11 SR |
411 | |
412 | static int trace_selftest_recursion_cnt; | |
413 | static void trace_selftest_test_recursion_func(unsigned long ip, | |
414 | unsigned long pip, | |
415 | struct ftrace_ops *op, | |
416 | struct pt_regs *pt_regs) | |
417 | { | |
418 | /* | |
419 | * This function is registered without the recursion safe flag. | |
420 | * The ftrace infrastructure should provide the recursion | |
421 | * protection. If not, this will crash the kernel! | |
422 | */ | |
9640388b SR |
423 | if (trace_selftest_recursion_cnt++ > 10) |
424 | return; | |
ea701f11 SR |
425 | DYN_FTRACE_TEST_NAME(); |
426 | } | |
427 | ||
428 | static void trace_selftest_test_recursion_safe_func(unsigned long ip, | |
429 | unsigned long pip, | |
430 | struct ftrace_ops *op, | |
431 | struct pt_regs *pt_regs) | |
432 | { | |
433 | /* | |
434 | * We said we would provide our own recursion. By calling | |
435 | * this function again, we should recurse back into this function | |
436 | * and count again. But this only happens if the arch supports | |
437 | * all of ftrace features and nothing else is using the function | |
438 | * tracing utility. | |
439 | */ | |
440 | if (trace_selftest_recursion_cnt++) | |
441 | return; | |
442 | DYN_FTRACE_TEST_NAME(); | |
443 | } | |
444 | ||
445 | static struct ftrace_ops test_rec_probe = { | |
446 | .func = trace_selftest_test_recursion_func, | |
447 | }; | |
448 | ||
449 | static struct ftrace_ops test_recsafe_probe = { | |
450 | .func = trace_selftest_test_recursion_safe_func, | |
451 | .flags = FTRACE_OPS_FL_RECURSION_SAFE, | |
452 | }; | |
453 | ||
454 | static int | |
455 | trace_selftest_function_recursion(void) | |
456 | { | |
457 | int save_ftrace_enabled = ftrace_enabled; | |
ea701f11 SR |
458 | char *func_name; |
459 | int len; | |
460 | int ret; | |
ea701f11 SR |
461 | |
462 | /* The previous test PASSED */ | |
463 | pr_cont("PASSED\n"); | |
464 | pr_info("Testing ftrace recursion: "); | |
465 | ||
466 | ||
467 | /* enable tracing, and record the filter function */ | |
468 | ftrace_enabled = 1; | |
ea701f11 SR |
469 | |
470 | /* Handle PPC64 '.' name */ | |
471 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); | |
472 | len = strlen(func_name); | |
473 | ||
474 | ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1); | |
475 | if (ret) { | |
476 | pr_cont("*Could not set filter* "); | |
477 | goto out; | |
478 | } | |
479 | ||
480 | ret = register_ftrace_function(&test_rec_probe); | |
481 | if (ret) { | |
482 | pr_cont("*could not register callback* "); | |
483 | goto out; | |
484 | } | |
485 | ||
486 | DYN_FTRACE_TEST_NAME(); | |
487 | ||
488 | unregister_ftrace_function(&test_rec_probe); | |
489 | ||
490 | ret = -1; | |
491 | if (trace_selftest_recursion_cnt != 1) { | |
492 | pr_cont("*callback not called once (%d)* ", | |
493 | trace_selftest_recursion_cnt); | |
494 | goto out; | |
495 | } | |
496 | ||
497 | trace_selftest_recursion_cnt = 1; | |
498 | ||
499 | pr_cont("PASSED\n"); | |
500 | pr_info("Testing ftrace recursion safe: "); | |
501 | ||
502 | ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1); | |
503 | if (ret) { | |
504 | pr_cont("*Could not set filter* "); | |
505 | goto out; | |
506 | } | |
507 | ||
508 | ret = register_ftrace_function(&test_recsafe_probe); | |
509 | if (ret) { | |
510 | pr_cont("*could not register callback* "); | |
511 | goto out; | |
512 | } | |
513 | ||
514 | DYN_FTRACE_TEST_NAME(); | |
515 | ||
516 | unregister_ftrace_function(&test_recsafe_probe); | |
517 | ||
ea701f11 | 518 | ret = -1; |
05cbbf64 SR |
519 | if (trace_selftest_recursion_cnt != 2) { |
520 | pr_cont("*callback not called expected 2 times (%d)* ", | |
521 | trace_selftest_recursion_cnt); | |
ea701f11 SR |
522 | goto out; |
523 | } | |
524 | ||
525 | ret = 0; | |
526 | out: | |
527 | ftrace_enabled = save_ftrace_enabled; | |
ea701f11 SR |
528 | |
529 | return ret; | |
530 | } | |
77a2b37d SR |
531 | #else |
532 | # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) | |
ea701f11 | 533 | # define trace_selftest_function_recursion() ({ 0; }) |
77a2b37d | 534 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
e9a22d1f | 535 | |
ad97772a SR |
536 | static enum { |
537 | TRACE_SELFTEST_REGS_START, | |
538 | TRACE_SELFTEST_REGS_FOUND, | |
539 | TRACE_SELFTEST_REGS_NOT_FOUND, | |
540 | } trace_selftest_regs_stat; | |
541 | ||
542 | static void trace_selftest_test_regs_func(unsigned long ip, | |
543 | unsigned long pip, | |
544 | struct ftrace_ops *op, | |
545 | struct pt_regs *pt_regs) | |
546 | { | |
547 | if (pt_regs) | |
548 | trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND; | |
549 | else | |
550 | trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND; | |
551 | } | |
552 | ||
553 | static struct ftrace_ops test_regs_probe = { | |
554 | .func = trace_selftest_test_regs_func, | |
555 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS, | |
556 | }; | |
557 | ||
558 | static int | |
559 | trace_selftest_function_regs(void) | |
560 | { | |
561 | int save_ftrace_enabled = ftrace_enabled; | |
ad97772a SR |
562 | char *func_name; |
563 | int len; | |
564 | int ret; | |
565 | int supported = 0; | |
566 | ||
06aeaaea | 567 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS |
ad97772a SR |
568 | supported = 1; |
569 | #endif | |
570 | ||
571 | /* The previous test PASSED */ | |
572 | pr_cont("PASSED\n"); | |
573 | pr_info("Testing ftrace regs%s: ", | |
574 | !supported ? "(no arch support)" : ""); | |
575 | ||
576 | /* enable tracing, and record the filter function */ | |
577 | ftrace_enabled = 1; | |
ad97772a SR |
578 | |
579 | /* Handle PPC64 '.' name */ | |
580 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); | |
581 | len = strlen(func_name); | |
582 | ||
583 | ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1); | |
584 | /* | |
585 | * If DYNAMIC_FTRACE is not set, then we just trace all functions. | |
586 | * This test really doesn't care. | |
587 | */ | |
588 | if (ret && ret != -ENODEV) { | |
589 | pr_cont("*Could not set filter* "); | |
590 | goto out; | |
591 | } | |
592 | ||
593 | ret = register_ftrace_function(&test_regs_probe); | |
594 | /* | |
595 | * Now if the arch does not support passing regs, then this should | |
596 | * have failed. | |
597 | */ | |
598 | if (!supported) { | |
599 | if (!ret) { | |
600 | pr_cont("*registered save-regs without arch support* "); | |
601 | goto out; | |
602 | } | |
603 | test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED; | |
604 | ret = register_ftrace_function(&test_regs_probe); | |
605 | } | |
606 | if (ret) { | |
607 | pr_cont("*could not register callback* "); | |
608 | goto out; | |
609 | } | |
610 | ||
611 | ||
612 | DYN_FTRACE_TEST_NAME(); | |
613 | ||
614 | unregister_ftrace_function(&test_regs_probe); | |
615 | ||
616 | ret = -1; | |
617 | ||
618 | switch (trace_selftest_regs_stat) { | |
619 | case TRACE_SELFTEST_REGS_START: | |
620 | pr_cont("*callback never called* "); | |
621 | goto out; | |
622 | ||
623 | case TRACE_SELFTEST_REGS_FOUND: | |
624 | if (supported) | |
625 | break; | |
626 | pr_cont("*callback received regs without arch support* "); | |
627 | goto out; | |
628 | ||
629 | case TRACE_SELFTEST_REGS_NOT_FOUND: | |
630 | if (!supported) | |
631 | break; | |
632 | pr_cont("*callback received NULL regs* "); | |
633 | goto out; | |
634 | } | |
635 | ||
636 | ret = 0; | |
637 | out: | |
638 | ftrace_enabled = save_ftrace_enabled; | |
ad97772a SR |
639 | |
640 | return ret; | |
641 | } | |
642 | ||
60a11774 SR |
643 | /* |
644 | * Simple verification test of ftrace function tracer. | |
645 | * Enable ftrace, sleep 1/10 second, and then read the trace | |
646 | * buffer to see if all is in order. | |
647 | */ | |
f1ed7c74 | 648 | __init int |
60a11774 SR |
649 | trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) |
650 | { | |
77a2b37d | 651 | int save_ftrace_enabled = ftrace_enabled; |
dd0e545f SR |
652 | unsigned long count; |
653 | int ret; | |
60a11774 | 654 | |
f1ed7c74 SRRH |
655 | #ifdef CONFIG_DYNAMIC_FTRACE |
656 | if (ftrace_filter_param) { | |
657 | printk(KERN_CONT " ... kernel command line filter set: force PASS ... "); | |
658 | return 0; | |
659 | } | |
660 | #endif | |
661 | ||
77a2b37d SR |
662 | /* make sure msleep has been recorded */ |
663 | msleep(1); | |
664 | ||
60a11774 | 665 | /* start the tracing */ |
c7aafc54 IM |
666 | ftrace_enabled = 1; |
667 | ||
b6f11df2 | 668 | ret = tracer_init(trace, tr); |
1c80025a FW |
669 | if (ret) { |
670 | warn_failed_init_tracer(trace, ret); | |
671 | goto out; | |
672 | } | |
673 | ||
60a11774 SR |
674 | /* Sleep for a 1/10 of a second */ |
675 | msleep(100); | |
676 | /* stop the tracing. */ | |
bbf5b1a0 | 677 | tracing_stop(); |
c7aafc54 IM |
678 | ftrace_enabled = 0; |
679 | ||
60a11774 | 680 | /* check the trace buffer */ |
12883efb | 681 | ret = trace_test_buffer(&tr->trace_buffer, &count); |
60a11774 | 682 | trace->reset(tr); |
bbf5b1a0 | 683 | tracing_start(); |
60a11774 SR |
684 | |
685 | if (!ret && !count) { | |
686 | printk(KERN_CONT ".. no entries found .."); | |
687 | ret = -1; | |
77a2b37d | 688 | goto out; |
60a11774 SR |
689 | } |
690 | ||
77a2b37d SR |
691 | ret = trace_selftest_startup_dynamic_tracing(trace, tr, |
692 | DYN_FTRACE_TEST_NAME); | |
ea701f11 SR |
693 | if (ret) |
694 | goto out; | |
77a2b37d | 695 | |
ea701f11 | 696 | ret = trace_selftest_function_recursion(); |
ad97772a SR |
697 | if (ret) |
698 | goto out; | |
699 | ||
700 | ret = trace_selftest_function_regs(); | |
77a2b37d SR |
701 | out: |
702 | ftrace_enabled = save_ftrace_enabled; | |
77a2b37d | 703 | |
4eebcc81 SR |
704 | /* kill ftrace totally if we failed */ |
705 | if (ret) | |
706 | ftrace_kill(); | |
707 | ||
60a11774 SR |
708 | return ret; |
709 | } | |
606576ce | 710 | #endif /* CONFIG_FUNCTION_TRACER */ |
60a11774 | 711 | |
7447dce9 FW |
712 | |
713 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
cf586b61 FW |
714 | |
715 | /* Maximum number of functions to trace before diagnosing a hang */ | |
716 | #define GRAPH_MAX_FUNC_TEST 100000000 | |
717 | ||
cf586b61 FW |
718 | static unsigned int graph_hang_thresh; |
719 | ||
720 | /* Wrap the real function entry probe to avoid possible hanging */ | |
721 | static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) | |
722 | { | |
723 | /* This is harmlessly racy, we want to approximately detect a hang */ | |
724 | if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) { | |
725 | ftrace_graph_stop(); | |
726 | printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); | |
7fe70b57 SRRH |
727 | if (ftrace_dump_on_oops) { |
728 | ftrace_dump(DUMP_ALL); | |
729 | /* ftrace_dump() disables tracing */ | |
730 | tracing_on(); | |
731 | } | |
cf586b61 FW |
732 | return 0; |
733 | } | |
734 | ||
735 | return trace_graph_entry(trace); | |
736 | } | |
737 | ||
7447dce9 FW |
738 | /* |
739 | * Pretty much the same than for the function tracer from which the selftest | |
740 | * has been borrowed. | |
741 | */ | |
f1ed7c74 | 742 | __init int |
7447dce9 FW |
743 | trace_selftest_startup_function_graph(struct tracer *trace, |
744 | struct trace_array *tr) | |
745 | { | |
746 | int ret; | |
747 | unsigned long count; | |
748 | ||
f1ed7c74 SRRH |
749 | #ifdef CONFIG_DYNAMIC_FTRACE |
750 | if (ftrace_filter_param) { | |
751 | printk(KERN_CONT " ... kernel command line filter set: force PASS ... "); | |
752 | return 0; | |
753 | } | |
754 | #endif | |
755 | ||
cf586b61 FW |
756 | /* |
757 | * Simulate the init() callback but we attach a watchdog callback | |
758 | * to detect and recover from possible hangs | |
759 | */ | |
12883efb | 760 | tracing_reset_online_cpus(&tr->trace_buffer); |
1a0799a8 | 761 | set_graph_array(tr); |
cf586b61 FW |
762 | ret = register_ftrace_graph(&trace_graph_return, |
763 | &trace_graph_entry_watchdog); | |
7447dce9 FW |
764 | if (ret) { |
765 | warn_failed_init_tracer(trace, ret); | |
766 | goto out; | |
767 | } | |
cf586b61 | 768 | tracing_start_cmdline_record(); |
7447dce9 FW |
769 | |
770 | /* Sleep for a 1/10 of a second */ | |
771 | msleep(100); | |
772 | ||
cf586b61 FW |
773 | /* Have we just recovered from a hang? */ |
774 | if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) { | |
0cf53ff6 | 775 | tracing_selftest_disabled = true; |
cf586b61 FW |
776 | ret = -1; |
777 | goto out; | |
778 | } | |
779 | ||
7447dce9 FW |
780 | tracing_stop(); |
781 | ||
782 | /* check the trace buffer */ | |
12883efb | 783 | ret = trace_test_buffer(&tr->trace_buffer, &count); |
7447dce9 FW |
784 | |
785 | trace->reset(tr); | |
786 | tracing_start(); | |
787 | ||
788 | if (!ret && !count) { | |
789 | printk(KERN_CONT ".. no entries found .."); | |
790 | ret = -1; | |
791 | goto out; | |
792 | } | |
793 | ||
794 | /* Don't test dynamic tracing, the function tracer already did */ | |
795 | ||
796 | out: | |
797 | /* Stop it if we failed */ | |
798 | if (ret) | |
799 | ftrace_graph_stop(); | |
800 | ||
801 | return ret; | |
802 | } | |
803 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | |
804 | ||
805 | ||
60a11774 SR |
806 | #ifdef CONFIG_IRQSOFF_TRACER |
807 | int | |
808 | trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | |
809 | { | |
6d9b3fa5 | 810 | unsigned long save_max = tr->max_latency; |
60a11774 SR |
811 | unsigned long count; |
812 | int ret; | |
813 | ||
814 | /* start the tracing */ | |
b6f11df2 | 815 | ret = tracer_init(trace, tr); |
1c80025a FW |
816 | if (ret) { |
817 | warn_failed_init_tracer(trace, ret); | |
818 | return ret; | |
819 | } | |
820 | ||
60a11774 | 821 | /* reset the max latency */ |
6d9b3fa5 | 822 | tr->max_latency = 0; |
60a11774 SR |
823 | /* disable interrupts for a bit */ |
824 | local_irq_disable(); | |
825 | udelay(100); | |
826 | local_irq_enable(); | |
49036200 FW |
827 | |
828 | /* | |
829 | * Stop the tracer to avoid a warning subsequent | |
830 | * to buffer flipping failure because tracing_stop() | |
831 | * disables the tr and max buffers, making flipping impossible | |
832 | * in case of parallels max irqs off latencies. | |
833 | */ | |
834 | trace->stop(tr); | |
60a11774 | 835 | /* stop the tracing. */ |
bbf5b1a0 | 836 | tracing_stop(); |
60a11774 | 837 | /* check both trace buffers */ |
12883efb | 838 | ret = trace_test_buffer(&tr->trace_buffer, NULL); |
60a11774 | 839 | if (!ret) |
12883efb | 840 | ret = trace_test_buffer(&tr->max_buffer, &count); |
60a11774 | 841 | trace->reset(tr); |
bbf5b1a0 | 842 | tracing_start(); |
60a11774 SR |
843 | |
844 | if (!ret && !count) { | |
845 | printk(KERN_CONT ".. no entries found .."); | |
846 | ret = -1; | |
847 | } | |
848 | ||
6d9b3fa5 | 849 | tr->max_latency = save_max; |
60a11774 SR |
850 | |
851 | return ret; | |
852 | } | |
853 | #endif /* CONFIG_IRQSOFF_TRACER */ | |
854 | ||
855 | #ifdef CONFIG_PREEMPT_TRACER | |
856 | int | |
857 | trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | |
858 | { | |
6d9b3fa5 | 859 | unsigned long save_max = tr->max_latency; |
60a11774 SR |
860 | unsigned long count; |
861 | int ret; | |
862 | ||
769c48eb SR |
863 | /* |
864 | * Now that the big kernel lock is no longer preemptable, | |
865 | * and this is called with the BKL held, it will always | |
866 | * fail. If preemption is already disabled, simply | |
867 | * pass the test. When the BKL is removed, or becomes | |
868 | * preemptible again, we will once again test this, | |
869 | * so keep it in. | |
870 | */ | |
871 | if (preempt_count()) { | |
872 | printk(KERN_CONT "can not test ... force "); | |
873 | return 0; | |
874 | } | |
875 | ||
60a11774 | 876 | /* start the tracing */ |
b6f11df2 | 877 | ret = tracer_init(trace, tr); |
1c80025a FW |
878 | if (ret) { |
879 | warn_failed_init_tracer(trace, ret); | |
880 | return ret; | |
881 | } | |
882 | ||
60a11774 | 883 | /* reset the max latency */ |
6d9b3fa5 | 884 | tr->max_latency = 0; |
60a11774 SR |
885 | /* disable preemption for a bit */ |
886 | preempt_disable(); | |
887 | udelay(100); | |
888 | preempt_enable(); | |
49036200 FW |
889 | |
890 | /* | |
891 | * Stop the tracer to avoid a warning subsequent | |
892 | * to buffer flipping failure because tracing_stop() | |
893 | * disables the tr and max buffers, making flipping impossible | |
894 | * in case of parallels max preempt off latencies. | |
895 | */ | |
896 | trace->stop(tr); | |
60a11774 | 897 | /* stop the tracing. */ |
bbf5b1a0 | 898 | tracing_stop(); |
60a11774 | 899 | /* check both trace buffers */ |
12883efb | 900 | ret = trace_test_buffer(&tr->trace_buffer, NULL); |
60a11774 | 901 | if (!ret) |
12883efb | 902 | ret = trace_test_buffer(&tr->max_buffer, &count); |
60a11774 | 903 | trace->reset(tr); |
bbf5b1a0 | 904 | tracing_start(); |
60a11774 SR |
905 | |
906 | if (!ret && !count) { | |
907 | printk(KERN_CONT ".. no entries found .."); | |
908 | ret = -1; | |
909 | } | |
910 | ||
6d9b3fa5 | 911 | tr->max_latency = save_max; |
60a11774 SR |
912 | |
913 | return ret; | |
914 | } | |
915 | #endif /* CONFIG_PREEMPT_TRACER */ | |
916 | ||
917 | #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) | |
918 | int | |
919 | trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) | |
920 | { | |
6d9b3fa5 | 921 | unsigned long save_max = tr->max_latency; |
60a11774 SR |
922 | unsigned long count; |
923 | int ret; | |
924 | ||
769c48eb SR |
925 | /* |
926 | * Now that the big kernel lock is no longer preemptable, | |
927 | * and this is called with the BKL held, it will always | |
928 | * fail. If preemption is already disabled, simply | |
929 | * pass the test. When the BKL is removed, or becomes | |
930 | * preemptible again, we will once again test this, | |
931 | * so keep it in. | |
932 | */ | |
933 | if (preempt_count()) { | |
934 | printk(KERN_CONT "can not test ... force "); | |
935 | return 0; | |
936 | } | |
937 | ||
60a11774 | 938 | /* start the tracing */ |
b6f11df2 | 939 | ret = tracer_init(trace, tr); |
1c80025a FW |
940 | if (ret) { |
941 | warn_failed_init_tracer(trace, ret); | |
ac1d52d0 | 942 | goto out_no_start; |
1c80025a | 943 | } |
60a11774 SR |
944 | |
945 | /* reset the max latency */ | |
6d9b3fa5 | 946 | tr->max_latency = 0; |
60a11774 SR |
947 | |
948 | /* disable preemption and interrupts for a bit */ | |
949 | preempt_disable(); | |
950 | local_irq_disable(); | |
951 | udelay(100); | |
952 | preempt_enable(); | |
953 | /* reverse the order of preempt vs irqs */ | |
954 | local_irq_enable(); | |
955 | ||
49036200 FW |
956 | /* |
957 | * Stop the tracer to avoid a warning subsequent | |
958 | * to buffer flipping failure because tracing_stop() | |
959 | * disables the tr and max buffers, making flipping impossible | |
960 | * in case of parallels max irqs/preempt off latencies. | |
961 | */ | |
962 | trace->stop(tr); | |
60a11774 | 963 | /* stop the tracing. */ |
bbf5b1a0 | 964 | tracing_stop(); |
60a11774 | 965 | /* check both trace buffers */ |
12883efb | 966 | ret = trace_test_buffer(&tr->trace_buffer, NULL); |
ac1d52d0 | 967 | if (ret) |
60a11774 SR |
968 | goto out; |
969 | ||
12883efb | 970 | ret = trace_test_buffer(&tr->max_buffer, &count); |
ac1d52d0 | 971 | if (ret) |
60a11774 SR |
972 | goto out; |
973 | ||
974 | if (!ret && !count) { | |
975 | printk(KERN_CONT ".. no entries found .."); | |
976 | ret = -1; | |
977 | goto out; | |
978 | } | |
979 | ||
980 | /* do the test by disabling interrupts first this time */ | |
6d9b3fa5 | 981 | tr->max_latency = 0; |
bbf5b1a0 | 982 | tracing_start(); |
49036200 FW |
983 | trace->start(tr); |
984 | ||
60a11774 SR |
985 | preempt_disable(); |
986 | local_irq_disable(); | |
987 | udelay(100); | |
988 | preempt_enable(); | |
989 | /* reverse the order of preempt vs irqs */ | |
990 | local_irq_enable(); | |
991 | ||
49036200 | 992 | trace->stop(tr); |
60a11774 | 993 | /* stop the tracing. */ |
bbf5b1a0 | 994 | tracing_stop(); |
60a11774 | 995 | /* check both trace buffers */ |
12883efb | 996 | ret = trace_test_buffer(&tr->trace_buffer, NULL); |
60a11774 SR |
997 | if (ret) |
998 | goto out; | |
999 | ||
12883efb | 1000 | ret = trace_test_buffer(&tr->max_buffer, &count); |
60a11774 SR |
1001 | |
1002 | if (!ret && !count) { | |
1003 | printk(KERN_CONT ".. no entries found .."); | |
1004 | ret = -1; | |
1005 | goto out; | |
1006 | } | |
1007 | ||
ac1d52d0 | 1008 | out: |
bbf5b1a0 | 1009 | tracing_start(); |
ac1d52d0 FW |
1010 | out_no_start: |
1011 | trace->reset(tr); | |
6d9b3fa5 | 1012 | tr->max_latency = save_max; |
60a11774 SR |
1013 | |
1014 | return ret; | |
1015 | } | |
1016 | #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */ | |
1017 | ||
fb1b6d8b SN |
1018 | #ifdef CONFIG_NOP_TRACER |
1019 | int | |
1020 | trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) | |
1021 | { | |
1022 | /* What could possibly go wrong? */ | |
1023 | return 0; | |
1024 | } | |
1025 | #endif | |
1026 | ||
60a11774 SR |
1027 | #ifdef CONFIG_SCHED_TRACER |
1028 | static int trace_wakeup_test_thread(void *data) | |
1029 | { | |
af6ace76 DF |
1030 | /* Make this a -deadline thread */ |
1031 | static const struct sched_attr attr = { | |
1032 | .sched_policy = SCHED_DEADLINE, | |
1033 | .sched_runtime = 100000ULL, | |
1034 | .sched_deadline = 10000000ULL, | |
1035 | .sched_period = 10000000ULL | |
1036 | }; | |
05bd68c5 | 1037 | struct completion *x = data; |
60a11774 | 1038 | |
af6ace76 | 1039 | sched_setattr(current, &attr); |
60a11774 SR |
1040 | |
1041 | /* Make it know we have a new prio */ | |
1042 | complete(x); | |
1043 | ||
1044 | /* now go to sleep and let the test wake us up */ | |
1045 | set_current_state(TASK_INTERRUPTIBLE); | |
1046 | schedule(); | |
1047 | ||
3c18c10b SR |
1048 | complete(x); |
1049 | ||
60a11774 SR |
1050 | /* we are awake, now wait to disappear */ |
1051 | while (!kthread_should_stop()) { | |
1052 | /* | |
af6ace76 DF |
1053 | * This will likely be the system top priority |
1054 | * task, do short sleeps to let others run. | |
60a11774 SR |
1055 | */ |
1056 | msleep(100); | |
1057 | } | |
1058 | ||
1059 | return 0; | |
1060 | } | |
1061 | ||
1062 | int | |
1063 | trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | |
1064 | { | |
6d9b3fa5 | 1065 | unsigned long save_max = tr->max_latency; |
60a11774 | 1066 | struct task_struct *p; |
af6ace76 | 1067 | struct completion is_ready; |
60a11774 SR |
1068 | unsigned long count; |
1069 | int ret; | |
1070 | ||
af6ace76 | 1071 | init_completion(&is_ready); |
60a11774 | 1072 | |
af6ace76 DF |
1073 | /* create a -deadline thread */ |
1074 | p = kthread_run(trace_wakeup_test_thread, &is_ready, "ftrace-test"); | |
c7aafc54 | 1075 | if (IS_ERR(p)) { |
60a11774 SR |
1076 | printk(KERN_CONT "Failed to create ftrace wakeup test thread "); |
1077 | return -1; | |
1078 | } | |
1079 | ||
af6ace76 DF |
1080 | /* make sure the thread is running at -deadline policy */ |
1081 | wait_for_completion(&is_ready); | |
60a11774 SR |
1082 | |
1083 | /* start the tracing */ | |
b6f11df2 | 1084 | ret = tracer_init(trace, tr); |
1c80025a FW |
1085 | if (ret) { |
1086 | warn_failed_init_tracer(trace, ret); | |
1087 | return ret; | |
1088 | } | |
1089 | ||
60a11774 | 1090 | /* reset the max latency */ |
6d9b3fa5 | 1091 | tr->max_latency = 0; |
60a11774 | 1092 | |
3c18c10b SR |
1093 | while (p->on_rq) { |
1094 | /* | |
af6ace76 | 1095 | * Sleep to make sure the -deadline thread is asleep too. |
3c18c10b SR |
1096 | * On virtual machines we can't rely on timings, |
1097 | * but we want to make sure this test still works. | |
1098 | */ | |
1099 | msleep(100); | |
1100 | } | |
60a11774 | 1101 | |
af6ace76 | 1102 | init_completion(&is_ready); |
60a11774 SR |
1103 | |
1104 | wake_up_process(p); | |
1105 | ||
3c18c10b | 1106 | /* Wait for the task to wake up */ |
af6ace76 | 1107 | wait_for_completion(&is_ready); |
5aa60c60 | 1108 | |
60a11774 | 1109 | /* stop the tracing. */ |
bbf5b1a0 | 1110 | tracing_stop(); |
60a11774 | 1111 | /* check both trace buffers */ |
12883efb | 1112 | ret = trace_test_buffer(&tr->trace_buffer, NULL); |
0d5c6e1c | 1113 | printk("ret = %d\n", ret); |
60a11774 | 1114 | if (!ret) |
12883efb | 1115 | ret = trace_test_buffer(&tr->max_buffer, &count); |
60a11774 SR |
1116 | |
1117 | ||
1118 | trace->reset(tr); | |
bbf5b1a0 | 1119 | tracing_start(); |
60a11774 | 1120 | |
6d9b3fa5 | 1121 | tr->max_latency = save_max; |
60a11774 SR |
1122 | |
1123 | /* kill the thread */ | |
1124 | kthread_stop(p); | |
1125 | ||
1126 | if (!ret && !count) { | |
1127 | printk(KERN_CONT ".. no entries found .."); | |
1128 | ret = -1; | |
1129 | } | |
1130 | ||
1131 | return ret; | |
1132 | } | |
1133 | #endif /* CONFIG_SCHED_TRACER */ | |
1134 | ||
1135 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER | |
1136 | int | |
1137 | trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr) | |
1138 | { | |
1139 | unsigned long count; | |
1140 | int ret; | |
1141 | ||
1142 | /* start the tracing */ | |
b6f11df2 | 1143 | ret = tracer_init(trace, tr); |
1c80025a FW |
1144 | if (ret) { |
1145 | warn_failed_init_tracer(trace, ret); | |
1146 | return ret; | |
1147 | } | |
1148 | ||
60a11774 SR |
1149 | /* Sleep for a 1/10 of a second */ |
1150 | msleep(100); | |
1151 | /* stop the tracing. */ | |
bbf5b1a0 | 1152 | tracing_stop(); |
60a11774 | 1153 | /* check the trace buffer */ |
12883efb | 1154 | ret = trace_test_buffer(&tr->trace_buffer, &count); |
60a11774 | 1155 | trace->reset(tr); |
bbf5b1a0 | 1156 | tracing_start(); |
60a11774 SR |
1157 | |
1158 | if (!ret && !count) { | |
1159 | printk(KERN_CONT ".. no entries found .."); | |
1160 | ret = -1; | |
1161 | } | |
1162 | ||
1163 | return ret; | |
1164 | } | |
1165 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ | |
a6dd24f8 | 1166 | |
80e5ea45 SR |
1167 | #ifdef CONFIG_BRANCH_TRACER |
1168 | int | |
1169 | trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | |
1170 | { | |
1171 | unsigned long count; | |
1172 | int ret; | |
1173 | ||
1174 | /* start the tracing */ | |
b6f11df2 | 1175 | ret = tracer_init(trace, tr); |
1c80025a FW |
1176 | if (ret) { |
1177 | warn_failed_init_tracer(trace, ret); | |
1178 | return ret; | |
1179 | } | |
1180 | ||
80e5ea45 SR |
1181 | /* Sleep for a 1/10 of a second */ |
1182 | msleep(100); | |
1183 | /* stop the tracing. */ | |
1184 | tracing_stop(); | |
1185 | /* check the trace buffer */ | |
0184d50f | 1186 | ret = trace_test_buffer(&tr->trace_buffer, &count); |
80e5ea45 SR |
1187 | trace->reset(tr); |
1188 | tracing_start(); | |
1189 | ||
d2ef7c2f WH |
1190 | if (!ret && !count) { |
1191 | printk(KERN_CONT ".. no entries found .."); | |
1192 | ret = -1; | |
1193 | } | |
1194 | ||
80e5ea45 SR |
1195 | return ret; |
1196 | } | |
1197 | #endif /* CONFIG_BRANCH_TRACER */ | |
321bb5e1 | 1198 |