ftrace: support for PowerPC
[deliverable/linux.git] / kernel / trace / trace_selftest.c
1 /* Include in trace.c */
2
3 #include <linux/kthread.h>
4 #include <linux/delay.h>
5
6 static inline int trace_valid_entry(struct trace_entry *entry)
7 {
8 switch (entry->type) {
9 case TRACE_FN:
10 case TRACE_CTX:
11 case TRACE_WAKE:
12 case TRACE_STACK:
13 case TRACE_SPECIAL:
14 return 1;
15 }
16 return 0;
17 }
18
19 static int
20 trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
21 {
22 struct trace_entry *entries;
23 struct page *page;
24 int idx = 0;
25 int i;
26
27 BUG_ON(list_empty(&data->trace_pages));
28 page = list_entry(data->trace_pages.next, struct page, lru);
29 entries = page_address(page);
30
31 if (head_page(data) != entries)
32 goto failed;
33
34 /*
35 * The starting trace buffer always has valid elements,
36 * if any element exists.
37 */
38 entries = head_page(data);
39
40 for (i = 0; i < tr->entries; i++) {
41
42 if (i < data->trace_idx && !trace_valid_entry(&entries[idx])) {
43 printk(KERN_CONT ".. invalid entry %d ",
44 entries[idx].type);
45 goto failed;
46 }
47
48 idx++;
49 if (idx >= ENTRIES_PER_PAGE) {
50 page = virt_to_page(entries);
51 if (page->lru.next == &data->trace_pages) {
52 if (i != tr->entries - 1) {
53 printk(KERN_CONT ".. entries buffer mismatch");
54 goto failed;
55 }
56 } else {
57 page = list_entry(page->lru.next, struct page, lru);
58 entries = page_address(page);
59 }
60 idx = 0;
61 }
62 }
63
64 page = virt_to_page(entries);
65 if (page->lru.next != &data->trace_pages) {
66 printk(KERN_CONT ".. too many entries");
67 goto failed;
68 }
69
70 return 0;
71
72 failed:
73 /* disable tracing */
74 tracing_disabled = 1;
75 printk(KERN_CONT ".. corrupted trace buffer .. ");
76 return -1;
77 }
78
79 /*
80 * Test the trace buffer to see if all the elements
81 * are still sane.
82 */
83 static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
84 {
85 unsigned long flags, cnt = 0;
86 int cpu, ret = 0;
87
88 /* Don't allow flipping of max traces now */
89 raw_local_irq_save(flags);
90 __raw_spin_lock(&ftrace_max_lock);
91 for_each_possible_cpu(cpu) {
92 if (!head_page(tr->data[cpu]))
93 continue;
94
95 cnt += tr->data[cpu]->trace_idx;
96
97 ret = trace_test_buffer_cpu(tr, tr->data[cpu]);
98 if (ret)
99 break;
100 }
101 __raw_spin_unlock(&ftrace_max_lock);
102 raw_local_irq_restore(flags);
103
104 if (count)
105 *count = cnt;
106
107 return ret;
108 }
109
110 #ifdef CONFIG_FTRACE
111
112 #ifdef CONFIG_DYNAMIC_FTRACE
113
114 #define __STR(x) #x
115 #define STR(x) __STR(x)
116
117 /* Test dynamic code modification and ftrace filters */
118 int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
119 struct trace_array *tr,
120 int (*func)(void))
121 {
122 unsigned long count;
123 int ret;
124 int save_ftrace_enabled = ftrace_enabled;
125 int save_tracer_enabled = tracer_enabled;
126 char *func_name;
127
128 /* The ftrace test PASSED */
129 printk(KERN_CONT "PASSED\n");
130 pr_info("Testing dynamic ftrace: ");
131
132 /* enable tracing, and record the filter function */
133 ftrace_enabled = 1;
134 tracer_enabled = 1;
135
136 /* passed in by parameter to fool gcc from optimizing */
137 func();
138
139 /* update the records */
140 ret = ftrace_force_update();
141 if (ret) {
142 printk(KERN_CONT ".. ftraced failed .. ");
143 return ret;
144 }
145
146 /*
147 * Some archs *cough*PowerPC*cough* add charachters to the
148 * start of the function names. We simply put a '*' to
149 * accomodate them.
150 */
151 func_name = "*" STR(DYN_FTRACE_TEST_NAME);
152
153 /* filter only on our function */
154 ftrace_set_filter(func_name, strlen(func_name), 1);
155
156 /* enable tracing */
157 tr->ctrl = 1;
158 trace->init(tr);
159 /* Sleep for a 1/10 of a second */
160 msleep(100);
161
162 /* we should have nothing in the buffer */
163 ret = trace_test_buffer(tr, &count);
164 if (ret)
165 goto out;
166
167 if (count) {
168 ret = -1;
169 printk(KERN_CONT ".. filter did not filter .. ");
170 goto out;
171 }
172
173 /* call our function again */
174 func();
175
176 /* sleep again */
177 msleep(100);
178
179 /* stop the tracing. */
180 tr->ctrl = 0;
181 trace->ctrl_update(tr);
182 ftrace_enabled = 0;
183
184 /* check the trace buffer */
185 ret = trace_test_buffer(tr, &count);
186 trace->reset(tr);
187
188 /* we should only have one item */
189 if (!ret && count != 1) {
190 printk(KERN_CONT ".. filter failed count=%ld ..", count);
191 ret = -1;
192 goto out;
193 }
194 out:
195 ftrace_enabled = save_ftrace_enabled;
196 tracer_enabled = save_tracer_enabled;
197
198 /* Enable tracing on all functions again */
199 ftrace_set_filter(NULL, 0, 1);
200
201 return ret;
202 }
203 #else
204 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
205 #endif /* CONFIG_DYNAMIC_FTRACE */
206 /*
207 * Simple verification test of ftrace function tracer.
208 * Enable ftrace, sleep 1/10 second, and then read the trace
209 * buffer to see if all is in order.
210 */
211 int
212 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
213 {
214 unsigned long count;
215 int ret;
216 int save_ftrace_enabled = ftrace_enabled;
217 int save_tracer_enabled = tracer_enabled;
218
219 /* make sure msleep has been recorded */
220 msleep(1);
221
222 /* force the recorded functions to be traced */
223 ret = ftrace_force_update();
224 if (ret) {
225 printk(KERN_CONT ".. ftraced failed .. ");
226 return ret;
227 }
228
229 /* start the tracing */
230 ftrace_enabled = 1;
231 tracer_enabled = 1;
232
233 tr->ctrl = 1;
234 trace->init(tr);
235 /* Sleep for a 1/10 of a second */
236 msleep(100);
237 /* stop the tracing. */
238 tr->ctrl = 0;
239 trace->ctrl_update(tr);
240 ftrace_enabled = 0;
241
242 /* check the trace buffer */
243 ret = trace_test_buffer(tr, &count);
244 trace->reset(tr);
245
246 if (!ret && !count) {
247 printk(KERN_CONT ".. no entries found ..");
248 ret = -1;
249 goto out;
250 }
251
252 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
253 DYN_FTRACE_TEST_NAME);
254
255 out:
256 ftrace_enabled = save_ftrace_enabled;
257 tracer_enabled = save_tracer_enabled;
258
259 /* kill ftrace totally if we failed */
260 if (ret)
261 ftrace_kill();
262
263 return ret;
264 }
265 #endif /* CONFIG_FTRACE */
266
267 #ifdef CONFIG_IRQSOFF_TRACER
268 int
269 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
270 {
271 unsigned long save_max = tracing_max_latency;
272 unsigned long count;
273 int ret;
274
275 /* start the tracing */
276 tr->ctrl = 1;
277 trace->init(tr);
278 /* reset the max latency */
279 tracing_max_latency = 0;
280 /* disable interrupts for a bit */
281 local_irq_disable();
282 udelay(100);
283 local_irq_enable();
284 /* stop the tracing. */
285 tr->ctrl = 0;
286 trace->ctrl_update(tr);
287 /* check both trace buffers */
288 ret = trace_test_buffer(tr, NULL);
289 if (!ret)
290 ret = trace_test_buffer(&max_tr, &count);
291 trace->reset(tr);
292
293 if (!ret && !count) {
294 printk(KERN_CONT ".. no entries found ..");
295 ret = -1;
296 }
297
298 tracing_max_latency = save_max;
299
300 return ret;
301 }
302 #endif /* CONFIG_IRQSOFF_TRACER */
303
304 #ifdef CONFIG_PREEMPT_TRACER
305 int
306 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
307 {
308 unsigned long save_max = tracing_max_latency;
309 unsigned long count;
310 int ret;
311
312 /* start the tracing */
313 tr->ctrl = 1;
314 trace->init(tr);
315 /* reset the max latency */
316 tracing_max_latency = 0;
317 /* disable preemption for a bit */
318 preempt_disable();
319 udelay(100);
320 preempt_enable();
321 /* stop the tracing. */
322 tr->ctrl = 0;
323 trace->ctrl_update(tr);
324 /* check both trace buffers */
325 ret = trace_test_buffer(tr, NULL);
326 if (!ret)
327 ret = trace_test_buffer(&max_tr, &count);
328 trace->reset(tr);
329
330 if (!ret && !count) {
331 printk(KERN_CONT ".. no entries found ..");
332 ret = -1;
333 }
334
335 tracing_max_latency = save_max;
336
337 return ret;
338 }
339 #endif /* CONFIG_PREEMPT_TRACER */
340
341 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
342 int
343 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
344 {
345 unsigned long save_max = tracing_max_latency;
346 unsigned long count;
347 int ret;
348
349 /* start the tracing */
350 tr->ctrl = 1;
351 trace->init(tr);
352
353 /* reset the max latency */
354 tracing_max_latency = 0;
355
356 /* disable preemption and interrupts for a bit */
357 preempt_disable();
358 local_irq_disable();
359 udelay(100);
360 preempt_enable();
361 /* reverse the order of preempt vs irqs */
362 local_irq_enable();
363
364 /* stop the tracing. */
365 tr->ctrl = 0;
366 trace->ctrl_update(tr);
367 /* check both trace buffers */
368 ret = trace_test_buffer(tr, NULL);
369 if (ret)
370 goto out;
371
372 ret = trace_test_buffer(&max_tr, &count);
373 if (ret)
374 goto out;
375
376 if (!ret && !count) {
377 printk(KERN_CONT ".. no entries found ..");
378 ret = -1;
379 goto out;
380 }
381
382 /* do the test by disabling interrupts first this time */
383 tracing_max_latency = 0;
384 tr->ctrl = 1;
385 trace->ctrl_update(tr);
386 preempt_disable();
387 local_irq_disable();
388 udelay(100);
389 preempt_enable();
390 /* reverse the order of preempt vs irqs */
391 local_irq_enable();
392
393 /* stop the tracing. */
394 tr->ctrl = 0;
395 trace->ctrl_update(tr);
396 /* check both trace buffers */
397 ret = trace_test_buffer(tr, NULL);
398 if (ret)
399 goto out;
400
401 ret = trace_test_buffer(&max_tr, &count);
402
403 if (!ret && !count) {
404 printk(KERN_CONT ".. no entries found ..");
405 ret = -1;
406 goto out;
407 }
408
409 out:
410 trace->reset(tr);
411 tracing_max_latency = save_max;
412
413 return ret;
414 }
415 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
416
417 #ifdef CONFIG_SCHED_TRACER
418 static int trace_wakeup_test_thread(void *data)
419 {
420 /* Make this a RT thread, doesn't need to be too high */
421 struct sched_param param = { .sched_priority = 5 };
422 struct completion *x = data;
423
424 sched_setscheduler(current, SCHED_FIFO, &param);
425
426 /* Make it know we have a new prio */
427 complete(x);
428
429 /* now go to sleep and let the test wake us up */
430 set_current_state(TASK_INTERRUPTIBLE);
431 schedule();
432
433 /* we are awake, now wait to disappear */
434 while (!kthread_should_stop()) {
435 /*
436 * This is an RT task, do short sleeps to let
437 * others run.
438 */
439 msleep(100);
440 }
441
442 return 0;
443 }
444
445 int
446 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
447 {
448 unsigned long save_max = tracing_max_latency;
449 struct task_struct *p;
450 struct completion isrt;
451 unsigned long count;
452 int ret;
453
454 init_completion(&isrt);
455
456 /* create a high prio thread */
457 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
458 if (IS_ERR(p)) {
459 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
460 return -1;
461 }
462
463 /* make sure the thread is running at an RT prio */
464 wait_for_completion(&isrt);
465
466 /* start the tracing */
467 tr->ctrl = 1;
468 trace->init(tr);
469 /* reset the max latency */
470 tracing_max_latency = 0;
471
472 /* sleep to let the RT thread sleep too */
473 msleep(100);
474
475 /*
476 * Yes this is slightly racy. It is possible that for some
477 * strange reason that the RT thread we created, did not
478 * call schedule for 100ms after doing the completion,
479 * and we do a wakeup on a task that already is awake.
480 * But that is extremely unlikely, and the worst thing that
481 * happens in such a case, is that we disable tracing.
482 * Honestly, if this race does happen something is horrible
483 * wrong with the system.
484 */
485
486 wake_up_process(p);
487
488 /* stop the tracing. */
489 tr->ctrl = 0;
490 trace->ctrl_update(tr);
491 /* check both trace buffers */
492 ret = trace_test_buffer(tr, NULL);
493 if (!ret)
494 ret = trace_test_buffer(&max_tr, &count);
495
496
497 trace->reset(tr);
498
499 tracing_max_latency = save_max;
500
501 /* kill the thread */
502 kthread_stop(p);
503
504 if (!ret && !count) {
505 printk(KERN_CONT ".. no entries found ..");
506 ret = -1;
507 }
508
509 return ret;
510 }
511 #endif /* CONFIG_SCHED_TRACER */
512
513 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
514 int
515 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
516 {
517 unsigned long count;
518 int ret;
519
520 /* start the tracing */
521 tr->ctrl = 1;
522 trace->init(tr);
523 /* Sleep for a 1/10 of a second */
524 msleep(100);
525 /* stop the tracing. */
526 tr->ctrl = 0;
527 trace->ctrl_update(tr);
528 /* check the trace buffer */
529 ret = trace_test_buffer(tr, &count);
530 trace->reset(tr);
531
532 if (!ret && !count) {
533 printk(KERN_CONT ".. no entries found ..");
534 ret = -1;
535 }
536
537 return ret;
538 }
539 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
This page took 0.041521 seconds and 5 git commands to generate.