ftrace: printk formatting infrastructure
[deliverable/linux.git] / kernel / trace / trace_selftest.c
CommitLineData
60a11774
SR
1/* Include in trace.c */
2
3#include <linux/kthread.h>
c7aafc54 4#include <linux/delay.h>
60a11774 5
e309b41d 6static inline int trace_valid_entry(struct trace_entry *entry)
60a11774
SR
7{
8 switch (entry->type) {
9 case TRACE_FN:
10 case TRACE_CTX:
57422797 11 case TRACE_WAKE:
dd0e545f 12 case TRACE_CONT:
06fa75ab 13 case TRACE_STACK:
dd0e545f 14 case TRACE_PRINT:
06fa75ab 15 case TRACE_SPECIAL:
60a11774
SR
16 return 1;
17 }
18 return 0;
19}
20
21static int
22trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
23{
60a11774 24 struct trace_entry *entries;
c7aafc54 25 struct page *page;
60a11774
SR
26 int idx = 0;
27 int i;
28
c7aafc54 29 BUG_ON(list_empty(&data->trace_pages));
60a11774
SR
30 page = list_entry(data->trace_pages.next, struct page, lru);
31 entries = page_address(page);
32
19384c03 33 check_pages(data);
c7aafc54 34 if (head_page(data) != entries)
60a11774
SR
35 goto failed;
36
37 /*
38 * The starting trace buffer always has valid elements,
c7aafc54 39 * if any element exists.
60a11774 40 */
c7aafc54 41 entries = head_page(data);
60a11774
SR
42
43 for (i = 0; i < tr->entries; i++) {
44
c7aafc54
IM
45 if (i < data->trace_idx && !trace_valid_entry(&entries[idx])) {
46 printk(KERN_CONT ".. invalid entry %d ",
47 entries[idx].type);
60a11774
SR
48 goto failed;
49 }
50
51 idx++;
52 if (idx >= ENTRIES_PER_PAGE) {
53 page = virt_to_page(entries);
54 if (page->lru.next == &data->trace_pages) {
55 if (i != tr->entries - 1) {
56 printk(KERN_CONT ".. entries buffer mismatch");
57 goto failed;
58 }
59 } else {
60 page = list_entry(page->lru.next, struct page, lru);
61 entries = page_address(page);
62 }
63 idx = 0;
64 }
65 }
66
67 page = virt_to_page(entries);
68 if (page->lru.next != &data->trace_pages) {
69 printk(KERN_CONT ".. too many entries");
70 goto failed;
71 }
72
73 return 0;
74
75 failed:
08bafa0e
SR
76 /* disable tracing */
77 tracing_disabled = 1;
60a11774
SR
78 printk(KERN_CONT ".. corrupted trace buffer .. ");
79 return -1;
80}
81
82/*
83 * Test the trace buffer to see if all the elements
84 * are still sane.
85 */
86static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
87{
30afdcb1
SR
88 unsigned long flags, cnt = 0;
89 int cpu, ret = 0;
60a11774 90
30afdcb1
SR
91 /* Don't allow flipping of max traces now */
92 raw_local_irq_save(flags);
93 __raw_spin_lock(&ftrace_max_lock);
60a11774 94 for_each_possible_cpu(cpu) {
c7aafc54 95 if (!head_page(tr->data[cpu]))
60a11774
SR
96 continue;
97
98 cnt += tr->data[cpu]->trace_idx;
60a11774
SR
99
100 ret = trace_test_buffer_cpu(tr, tr->data[cpu]);
101 if (ret)
102 break;
103 }
30afdcb1
SR
104 __raw_spin_unlock(&ftrace_max_lock);
105 raw_local_irq_restore(flags);
60a11774
SR
106
107 if (count)
108 *count = cnt;
109
110 return ret;
111}
112
113#ifdef CONFIG_FTRACE
77a2b37d
SR
114
115#ifdef CONFIG_DYNAMIC_FTRACE
116
77a2b37d
SR
117#define __STR(x) #x
118#define STR(x) __STR(x)
77a2b37d
SR
119
120/* Test dynamic code modification and ftrace filters */
121int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
122 struct trace_array *tr,
123 int (*func)(void))
124{
77a2b37d
SR
125 int save_ftrace_enabled = ftrace_enabled;
126 int save_tracer_enabled = tracer_enabled;
dd0e545f 127 unsigned long count;
4e491d14 128 char *func_name;
dd0e545f 129 int ret;
77a2b37d
SR
130
131 /* The ftrace test PASSED */
132 printk(KERN_CONT "PASSED\n");
133 pr_info("Testing dynamic ftrace: ");
134
135 /* enable tracing, and record the filter function */
136 ftrace_enabled = 1;
137 tracer_enabled = 1;
138
139 /* passed in by parameter to fool gcc from optimizing */
140 func();
141
142 /* update the records */
143 ret = ftrace_force_update();
144 if (ret) {
145 printk(KERN_CONT ".. ftraced failed .. ");
146 return ret;
147 }
148
4e491d14
SR
149 /*
150 * Some archs *cough*PowerPC*cough* add charachters to the
151 * start of the function names. We simply put a '*' to
152 * accomodate them.
153 */
154 func_name = "*" STR(DYN_FTRACE_TEST_NAME);
155
77a2b37d 156 /* filter only on our function */
4e491d14 157 ftrace_set_filter(func_name, strlen(func_name), 1);
77a2b37d
SR
158
159 /* enable tracing */
160 tr->ctrl = 1;
161 trace->init(tr);
dd0e545f 162
77a2b37d
SR
163 /* Sleep for a 1/10 of a second */
164 msleep(100);
165
166 /* we should have nothing in the buffer */
167 ret = trace_test_buffer(tr, &count);
168 if (ret)
169 goto out;
170
171 if (count) {
172 ret = -1;
173 printk(KERN_CONT ".. filter did not filter .. ");
174 goto out;
175 }
176
177 /* call our function again */
178 func();
179
180 /* sleep again */
181 msleep(100);
182
183 /* stop the tracing. */
184 tr->ctrl = 0;
185 trace->ctrl_update(tr);
186 ftrace_enabled = 0;
187
188 /* check the trace buffer */
189 ret = trace_test_buffer(tr, &count);
190 trace->reset(tr);
191
192 /* we should only have one item */
193 if (!ret && count != 1) {
06fa75ab 194 printk(KERN_CONT ".. filter failed count=%ld ..", count);
77a2b37d
SR
195 ret = -1;
196 goto out;
197 }
198 out:
199 ftrace_enabled = save_ftrace_enabled;
200 tracer_enabled = save_tracer_enabled;
201
202 /* Enable tracing on all functions again */
203 ftrace_set_filter(NULL, 0, 1);
204
205 return ret;
206}
207#else
208# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
209#endif /* CONFIG_DYNAMIC_FTRACE */
60a11774
SR
210/*
211 * Simple verification test of ftrace function tracer.
212 * Enable ftrace, sleep 1/10 second, and then read the trace
213 * buffer to see if all is in order.
214 */
215int
216trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
217{
77a2b37d
SR
218 int save_ftrace_enabled = ftrace_enabled;
219 int save_tracer_enabled = tracer_enabled;
dd0e545f
SR
220 unsigned long count;
221 int ret;
60a11774 222
77a2b37d
SR
223 /* make sure msleep has been recorded */
224 msleep(1);
225
226 /* force the recorded functions to be traced */
60a11774
SR
227 ret = ftrace_force_update();
228 if (ret) {
229 printk(KERN_CONT ".. ftraced failed .. ");
230 return ret;
231 }
232
233 /* start the tracing */
c7aafc54 234 ftrace_enabled = 1;
77a2b37d 235 tracer_enabled = 1;
c7aafc54 236
60a11774
SR
237 tr->ctrl = 1;
238 trace->init(tr);
239 /* Sleep for a 1/10 of a second */
240 msleep(100);
241 /* stop the tracing. */
242 tr->ctrl = 0;
243 trace->ctrl_update(tr);
c7aafc54
IM
244 ftrace_enabled = 0;
245
60a11774
SR
246 /* check the trace buffer */
247 ret = trace_test_buffer(tr, &count);
248 trace->reset(tr);
249
250 if (!ret && !count) {
251 printk(KERN_CONT ".. no entries found ..");
252 ret = -1;
77a2b37d 253 goto out;
60a11774
SR
254 }
255
77a2b37d
SR
256 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
257 DYN_FTRACE_TEST_NAME);
258
259 out:
260 ftrace_enabled = save_ftrace_enabled;
261 tracer_enabled = save_tracer_enabled;
262
4eebcc81
SR
263 /* kill ftrace totally if we failed */
264 if (ret)
265 ftrace_kill();
266
60a11774
SR
267 return ret;
268}
269#endif /* CONFIG_FTRACE */
270
271#ifdef CONFIG_IRQSOFF_TRACER
272int
273trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
274{
275 unsigned long save_max = tracing_max_latency;
276 unsigned long count;
277 int ret;
278
279 /* start the tracing */
280 tr->ctrl = 1;
281 trace->init(tr);
282 /* reset the max latency */
283 tracing_max_latency = 0;
284 /* disable interrupts for a bit */
285 local_irq_disable();
286 udelay(100);
287 local_irq_enable();
288 /* stop the tracing. */
289 tr->ctrl = 0;
290 trace->ctrl_update(tr);
291 /* check both trace buffers */
292 ret = trace_test_buffer(tr, NULL);
293 if (!ret)
294 ret = trace_test_buffer(&max_tr, &count);
295 trace->reset(tr);
296
297 if (!ret && !count) {
298 printk(KERN_CONT ".. no entries found ..");
299 ret = -1;
300 }
301
302 tracing_max_latency = save_max;
303
304 return ret;
305}
306#endif /* CONFIG_IRQSOFF_TRACER */
307
308#ifdef CONFIG_PREEMPT_TRACER
309int
310trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
311{
312 unsigned long save_max = tracing_max_latency;
313 unsigned long count;
314 int ret;
315
316 /* start the tracing */
317 tr->ctrl = 1;
318 trace->init(tr);
319 /* reset the max latency */
320 tracing_max_latency = 0;
321 /* disable preemption for a bit */
322 preempt_disable();
323 udelay(100);
324 preempt_enable();
325 /* stop the tracing. */
326 tr->ctrl = 0;
327 trace->ctrl_update(tr);
328 /* check both trace buffers */
329 ret = trace_test_buffer(tr, NULL);
330 if (!ret)
331 ret = trace_test_buffer(&max_tr, &count);
332 trace->reset(tr);
333
334 if (!ret && !count) {
335 printk(KERN_CONT ".. no entries found ..");
336 ret = -1;
337 }
338
339 tracing_max_latency = save_max;
340
341 return ret;
342}
343#endif /* CONFIG_PREEMPT_TRACER */
344
345#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
346int
347trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
348{
349 unsigned long save_max = tracing_max_latency;
350 unsigned long count;
351 int ret;
352
353 /* start the tracing */
354 tr->ctrl = 1;
355 trace->init(tr);
356
357 /* reset the max latency */
358 tracing_max_latency = 0;
359
360 /* disable preemption and interrupts for a bit */
361 preempt_disable();
362 local_irq_disable();
363 udelay(100);
364 preempt_enable();
365 /* reverse the order of preempt vs irqs */
366 local_irq_enable();
367
368 /* stop the tracing. */
369 tr->ctrl = 0;
370 trace->ctrl_update(tr);
371 /* check both trace buffers */
372 ret = trace_test_buffer(tr, NULL);
373 if (ret)
374 goto out;
375
376 ret = trace_test_buffer(&max_tr, &count);
377 if (ret)
378 goto out;
379
380 if (!ret && !count) {
381 printk(KERN_CONT ".. no entries found ..");
382 ret = -1;
383 goto out;
384 }
385
386 /* do the test by disabling interrupts first this time */
387 tracing_max_latency = 0;
388 tr->ctrl = 1;
389 trace->ctrl_update(tr);
390 preempt_disable();
391 local_irq_disable();
392 udelay(100);
393 preempt_enable();
394 /* reverse the order of preempt vs irqs */
395 local_irq_enable();
396
397 /* stop the tracing. */
398 tr->ctrl = 0;
399 trace->ctrl_update(tr);
400 /* check both trace buffers */
401 ret = trace_test_buffer(tr, NULL);
402 if (ret)
403 goto out;
404
405 ret = trace_test_buffer(&max_tr, &count);
406
407 if (!ret && !count) {
408 printk(KERN_CONT ".. no entries found ..");
409 ret = -1;
410 goto out;
411 }
412
413 out:
414 trace->reset(tr);
415 tracing_max_latency = save_max;
416
417 return ret;
418}
419#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
420
421#ifdef CONFIG_SCHED_TRACER
422static int trace_wakeup_test_thread(void *data)
423{
60a11774 424 /* Make this a RT thread, doesn't need to be too high */
05bd68c5
SR
425 struct sched_param param = { .sched_priority = 5 };
426 struct completion *x = data;
60a11774 427
05bd68c5 428 sched_setscheduler(current, SCHED_FIFO, &param);
60a11774
SR
429
430 /* Make it know we have a new prio */
431 complete(x);
432
433 /* now go to sleep and let the test wake us up */
434 set_current_state(TASK_INTERRUPTIBLE);
435 schedule();
436
437 /* we are awake, now wait to disappear */
438 while (!kthread_should_stop()) {
439 /*
440 * This is an RT task, do short sleeps to let
441 * others run.
442 */
443 msleep(100);
444 }
445
446 return 0;
447}
448
449int
450trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
451{
452 unsigned long save_max = tracing_max_latency;
453 struct task_struct *p;
454 struct completion isrt;
455 unsigned long count;
456 int ret;
457
458 init_completion(&isrt);
459
460 /* create a high prio thread */
461 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
c7aafc54 462 if (IS_ERR(p)) {
60a11774
SR
463 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
464 return -1;
465 }
466
467 /* make sure the thread is running at an RT prio */
468 wait_for_completion(&isrt);
469
470 /* start the tracing */
471 tr->ctrl = 1;
472 trace->init(tr);
473 /* reset the max latency */
474 tracing_max_latency = 0;
475
476 /* sleep to let the RT thread sleep too */
477 msleep(100);
478
479 /*
480 * Yes this is slightly racy. It is possible that for some
481 * strange reason that the RT thread we created, did not
482 * call schedule for 100ms after doing the completion,
483 * and we do a wakeup on a task that already is awake.
484 * But that is extremely unlikely, and the worst thing that
485 * happens in such a case, is that we disable tracing.
486 * Honestly, if this race does happen something is horrible
487 * wrong with the system.
488 */
489
490 wake_up_process(p);
491
492 /* stop the tracing. */
493 tr->ctrl = 0;
494 trace->ctrl_update(tr);
495 /* check both trace buffers */
496 ret = trace_test_buffer(tr, NULL);
497 if (!ret)
498 ret = trace_test_buffer(&max_tr, &count);
499
500
501 trace->reset(tr);
502
503 tracing_max_latency = save_max;
504
505 /* kill the thread */
506 kthread_stop(p);
507
508 if (!ret && !count) {
509 printk(KERN_CONT ".. no entries found ..");
510 ret = -1;
511 }
512
513 return ret;
514}
515#endif /* CONFIG_SCHED_TRACER */
516
517#ifdef CONFIG_CONTEXT_SWITCH_TRACER
518int
519trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
520{
521 unsigned long count;
522 int ret;
523
524 /* start the tracing */
525 tr->ctrl = 1;
526 trace->init(tr);
527 /* Sleep for a 1/10 of a second */
528 msleep(100);
529 /* stop the tracing. */
530 tr->ctrl = 0;
531 trace->ctrl_update(tr);
532 /* check the trace buffer */
533 ret = trace_test_buffer(tr, &count);
534 trace->reset(tr);
535
536 if (!ret && !count) {
537 printk(KERN_CONT ".. no entries found ..");
538 ret = -1;
539 }
540
541 return ret;
542}
543#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
a6dd24f8
IM
544
545#ifdef CONFIG_SYSPROF_TRACER
546int
547trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
548{
549 unsigned long count;
550 int ret;
551
552 /* start the tracing */
553 tr->ctrl = 1;
554 trace->init(tr);
555 /* Sleep for a 1/10 of a second */
556 msleep(100);
557 /* stop the tracing. */
558 tr->ctrl = 0;
559 trace->ctrl_update(tr);
560 /* check the trace buffer */
561 ret = trace_test_buffer(tr, &count);
562 trace->reset(tr);
563
a6dd24f8
IM
564 return ret;
565}
566#endif /* CONFIG_SYSPROF_TRACER */
This page took 0.194602 seconds and 5 git commands to generate.