powerpc: Fix missing 'blr' in _tlbia()
[deliverable/linux.git] / kernel / trace / trace_selftest.c
1 /* Include in trace.c */
2
3 #include <linux/kthread.h>
4 #include <linux/delay.h>
5
6 static inline int trace_valid_entry(struct trace_entry *entry)
7 {
8 switch (entry->type) {
9 case TRACE_FN:
10 case TRACE_CTX:
11 case TRACE_WAKE:
12 case TRACE_CONT:
13 case TRACE_STACK:
14 case TRACE_PRINT:
15 case TRACE_SPECIAL:
16 return 1;
17 }
18 return 0;
19 }
20
21 static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
22 {
23 struct ring_buffer_event *event;
24 struct trace_entry *entry;
25
26 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
27 entry = ring_buffer_event_data(event);
28
29 if (!trace_valid_entry(entry)) {
30 printk(KERN_CONT ".. invalid entry %d ",
31 entry->type);
32 goto failed;
33 }
34 }
35 return 0;
36
37 failed:
38 /* disable tracing */
39 tracing_disabled = 1;
40 printk(KERN_CONT ".. corrupted trace buffer .. ");
41 return -1;
42 }
43
44 /*
45 * Test the trace buffer to see if all the elements
46 * are still sane.
47 */
48 static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
49 {
50 unsigned long flags, cnt = 0;
51 int cpu, ret = 0;
52
53 /* Don't allow flipping of max traces now */
54 raw_local_irq_save(flags);
55 __raw_spin_lock(&ftrace_max_lock);
56
57 cnt = ring_buffer_entries(tr->buffer);
58
59 for_each_possible_cpu(cpu) {
60 ret = trace_test_buffer_cpu(tr, cpu);
61 if (ret)
62 break;
63 }
64 __raw_spin_unlock(&ftrace_max_lock);
65 raw_local_irq_restore(flags);
66
67 if (count)
68 *count = cnt;
69
70 return ret;
71 }
72
73 #ifdef CONFIG_FUNCTION_TRACER
74
75 #ifdef CONFIG_DYNAMIC_FTRACE
76
77 #define __STR(x) #x
78 #define STR(x) __STR(x)
79
80 /* Test dynamic code modification and ftrace filters */
81 int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
82 struct trace_array *tr,
83 int (*func)(void))
84 {
85 int save_ftrace_enabled = ftrace_enabled;
86 int save_tracer_enabled = tracer_enabled;
87 unsigned long count;
88 char *func_name;
89 int ret;
90
91 /* The ftrace test PASSED */
92 printk(KERN_CONT "PASSED\n");
93 pr_info("Testing dynamic ftrace: ");
94
95 /* enable tracing, and record the filter function */
96 ftrace_enabled = 1;
97 tracer_enabled = 1;
98
99 /* passed in by parameter to fool gcc from optimizing */
100 func();
101
102 /*
103 * Some archs *cough*PowerPC*cough* add charachters to the
104 * start of the function names. We simply put a '*' to
105 * accomodate them.
106 */
107 func_name = "*" STR(DYN_FTRACE_TEST_NAME);
108
109 /* filter only on our function */
110 ftrace_set_filter(func_name, strlen(func_name), 1);
111
112 /* enable tracing */
113 tr->ctrl = 1;
114 trace->init(tr);
115
116 /* Sleep for a 1/10 of a second */
117 msleep(100);
118
119 /* we should have nothing in the buffer */
120 ret = trace_test_buffer(tr, &count);
121 if (ret)
122 goto out;
123
124 if (count) {
125 ret = -1;
126 printk(KERN_CONT ".. filter did not filter .. ");
127 goto out;
128 }
129
130 /* call our function again */
131 func();
132
133 /* sleep again */
134 msleep(100);
135
136 /* stop the tracing. */
137 tr->ctrl = 0;
138 trace->ctrl_update(tr);
139 ftrace_enabled = 0;
140
141 /* check the trace buffer */
142 ret = trace_test_buffer(tr, &count);
143 trace->reset(tr);
144
145 /* we should only have one item */
146 if (!ret && count != 1) {
147 printk(KERN_CONT ".. filter failed count=%ld ..", count);
148 ret = -1;
149 goto out;
150 }
151 out:
152 ftrace_enabled = save_ftrace_enabled;
153 tracer_enabled = save_tracer_enabled;
154
155 /* Enable tracing on all functions again */
156 ftrace_set_filter(NULL, 0, 1);
157
158 return ret;
159 }
160 #else
161 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
162 #endif /* CONFIG_DYNAMIC_FTRACE */
163 /*
164 * Simple verification test of ftrace function tracer.
165 * Enable ftrace, sleep 1/10 second, and then read the trace
166 * buffer to see if all is in order.
167 */
168 int
169 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
170 {
171 int save_ftrace_enabled = ftrace_enabled;
172 int save_tracer_enabled = tracer_enabled;
173 unsigned long count;
174 int ret;
175
176 /* make sure msleep has been recorded */
177 msleep(1);
178
179 /* start the tracing */
180 ftrace_enabled = 1;
181 tracer_enabled = 1;
182
183 tr->ctrl = 1;
184 trace->init(tr);
185 /* Sleep for a 1/10 of a second */
186 msleep(100);
187 /* stop the tracing. */
188 tr->ctrl = 0;
189 trace->ctrl_update(tr);
190 ftrace_enabled = 0;
191
192 /* check the trace buffer */
193 ret = trace_test_buffer(tr, &count);
194 trace->reset(tr);
195
196 if (!ret && !count) {
197 printk(KERN_CONT ".. no entries found ..");
198 ret = -1;
199 goto out;
200 }
201
202 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
203 DYN_FTRACE_TEST_NAME);
204
205 out:
206 ftrace_enabled = save_ftrace_enabled;
207 tracer_enabled = save_tracer_enabled;
208
209 /* kill ftrace totally if we failed */
210 if (ret)
211 ftrace_kill();
212
213 return ret;
214 }
215 #endif /* CONFIG_FUNCTION_TRACER */
216
217 #ifdef CONFIG_IRQSOFF_TRACER
218 int
219 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
220 {
221 unsigned long save_max = tracing_max_latency;
222 unsigned long count;
223 int ret;
224
225 /* start the tracing */
226 tr->ctrl = 1;
227 trace->init(tr);
228 /* reset the max latency */
229 tracing_max_latency = 0;
230 /* disable interrupts for a bit */
231 local_irq_disable();
232 udelay(100);
233 local_irq_enable();
234 /* stop the tracing. */
235 tr->ctrl = 0;
236 trace->ctrl_update(tr);
237 /* check both trace buffers */
238 ret = trace_test_buffer(tr, NULL);
239 if (!ret)
240 ret = trace_test_buffer(&max_tr, &count);
241 trace->reset(tr);
242
243 if (!ret && !count) {
244 printk(KERN_CONT ".. no entries found ..");
245 ret = -1;
246 }
247
248 tracing_max_latency = save_max;
249
250 return ret;
251 }
252 #endif /* CONFIG_IRQSOFF_TRACER */
253
254 #ifdef CONFIG_PREEMPT_TRACER
255 int
256 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
257 {
258 unsigned long save_max = tracing_max_latency;
259 unsigned long count;
260 int ret;
261
262 /* start the tracing */
263 tr->ctrl = 1;
264 trace->init(tr);
265 /* reset the max latency */
266 tracing_max_latency = 0;
267 /* disable preemption for a bit */
268 preempt_disable();
269 udelay(100);
270 preempt_enable();
271 /* stop the tracing. */
272 tr->ctrl = 0;
273 trace->ctrl_update(tr);
274 /* check both trace buffers */
275 ret = trace_test_buffer(tr, NULL);
276 if (!ret)
277 ret = trace_test_buffer(&max_tr, &count);
278 trace->reset(tr);
279
280 if (!ret && !count) {
281 printk(KERN_CONT ".. no entries found ..");
282 ret = -1;
283 }
284
285 tracing_max_latency = save_max;
286
287 return ret;
288 }
289 #endif /* CONFIG_PREEMPT_TRACER */
290
291 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
292 int
293 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
294 {
295 unsigned long save_max = tracing_max_latency;
296 unsigned long count;
297 int ret;
298
299 /* start the tracing */
300 tr->ctrl = 1;
301 trace->init(tr);
302
303 /* reset the max latency */
304 tracing_max_latency = 0;
305
306 /* disable preemption and interrupts for a bit */
307 preempt_disable();
308 local_irq_disable();
309 udelay(100);
310 preempt_enable();
311 /* reverse the order of preempt vs irqs */
312 local_irq_enable();
313
314 /* stop the tracing. */
315 tr->ctrl = 0;
316 trace->ctrl_update(tr);
317 /* check both trace buffers */
318 ret = trace_test_buffer(tr, NULL);
319 if (ret)
320 goto out;
321
322 ret = trace_test_buffer(&max_tr, &count);
323 if (ret)
324 goto out;
325
326 if (!ret && !count) {
327 printk(KERN_CONT ".. no entries found ..");
328 ret = -1;
329 goto out;
330 }
331
332 /* do the test by disabling interrupts first this time */
333 tracing_max_latency = 0;
334 tr->ctrl = 1;
335 trace->ctrl_update(tr);
336 preempt_disable();
337 local_irq_disable();
338 udelay(100);
339 preempt_enable();
340 /* reverse the order of preempt vs irqs */
341 local_irq_enable();
342
343 /* stop the tracing. */
344 tr->ctrl = 0;
345 trace->ctrl_update(tr);
346 /* check both trace buffers */
347 ret = trace_test_buffer(tr, NULL);
348 if (ret)
349 goto out;
350
351 ret = trace_test_buffer(&max_tr, &count);
352
353 if (!ret && !count) {
354 printk(KERN_CONT ".. no entries found ..");
355 ret = -1;
356 goto out;
357 }
358
359 out:
360 trace->reset(tr);
361 tracing_max_latency = save_max;
362
363 return ret;
364 }
365 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
366
367 #ifdef CONFIG_NOP_TRACER
368 int
369 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
370 {
371 /* What could possibly go wrong? */
372 return 0;
373 }
374 #endif
375
376 #ifdef CONFIG_SCHED_TRACER
377 static int trace_wakeup_test_thread(void *data)
378 {
379 /* Make this a RT thread, doesn't need to be too high */
380 struct sched_param param = { .sched_priority = 5 };
381 struct completion *x = data;
382
383 sched_setscheduler(current, SCHED_FIFO, &param);
384
385 /* Make it know we have a new prio */
386 complete(x);
387
388 /* now go to sleep and let the test wake us up */
389 set_current_state(TASK_INTERRUPTIBLE);
390 schedule();
391
392 /* we are awake, now wait to disappear */
393 while (!kthread_should_stop()) {
394 /*
395 * This is an RT task, do short sleeps to let
396 * others run.
397 */
398 msleep(100);
399 }
400
401 return 0;
402 }
403
404 int
405 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
406 {
407 unsigned long save_max = tracing_max_latency;
408 struct task_struct *p;
409 struct completion isrt;
410 unsigned long count;
411 int ret;
412
413 init_completion(&isrt);
414
415 /* create a high prio thread */
416 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
417 if (IS_ERR(p)) {
418 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
419 return -1;
420 }
421
422 /* make sure the thread is running at an RT prio */
423 wait_for_completion(&isrt);
424
425 /* start the tracing */
426 tr->ctrl = 1;
427 trace->init(tr);
428 /* reset the max latency */
429 tracing_max_latency = 0;
430
431 /* sleep to let the RT thread sleep too */
432 msleep(100);
433
434 /*
435 * Yes this is slightly racy. It is possible that for some
436 * strange reason that the RT thread we created, did not
437 * call schedule for 100ms after doing the completion,
438 * and we do a wakeup on a task that already is awake.
439 * But that is extremely unlikely, and the worst thing that
440 * happens in such a case, is that we disable tracing.
441 * Honestly, if this race does happen something is horrible
442 * wrong with the system.
443 */
444
445 wake_up_process(p);
446
447 /* give a little time to let the thread wake up */
448 msleep(100);
449
450 /* stop the tracing. */
451 tr->ctrl = 0;
452 trace->ctrl_update(tr);
453 /* check both trace buffers */
454 ret = trace_test_buffer(tr, NULL);
455 if (!ret)
456 ret = trace_test_buffer(&max_tr, &count);
457
458
459 trace->reset(tr);
460
461 tracing_max_latency = save_max;
462
463 /* kill the thread */
464 kthread_stop(p);
465
466 if (!ret && !count) {
467 printk(KERN_CONT ".. no entries found ..");
468 ret = -1;
469 }
470
471 return ret;
472 }
473 #endif /* CONFIG_SCHED_TRACER */
474
475 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
476 int
477 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
478 {
479 unsigned long count;
480 int ret;
481
482 /* start the tracing */
483 tr->ctrl = 1;
484 trace->init(tr);
485 /* Sleep for a 1/10 of a second */
486 msleep(100);
487 /* stop the tracing. */
488 tr->ctrl = 0;
489 trace->ctrl_update(tr);
490 /* check the trace buffer */
491 ret = trace_test_buffer(tr, &count);
492 trace->reset(tr);
493
494 if (!ret && !count) {
495 printk(KERN_CONT ".. no entries found ..");
496 ret = -1;
497 }
498
499 return ret;
500 }
501 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
502
503 #ifdef CONFIG_SYSPROF_TRACER
504 int
505 trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
506 {
507 unsigned long count;
508 int ret;
509
510 /* start the tracing */
511 tr->ctrl = 1;
512 trace->init(tr);
513 /* Sleep for a 1/10 of a second */
514 msleep(100);
515 /* stop the tracing. */
516 tr->ctrl = 0;
517 trace->ctrl_update(tr);
518 /* check the trace buffer */
519 ret = trace_test_buffer(tr, &count);
520 trace->reset(tr);
521
522 return ret;
523 }
524 #endif /* CONFIG_SYSPROF_TRACER */
This page took 0.042632 seconds and 5 git commands to generate.