tracing/ftrace: factorize the tracing files creation
[deliverable/linux.git] / kernel / trace / ftrace.c
1 /*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/kprobes.h>
26 #include <linux/ftrace.h>
27 #include <linux/sysctl.h>
28 #include <linux/ctype.h>
29 #include <linux/list.h>
30 #include <linux/hash.h>
31
32 #include <trace/sched.h>
33
34 #include <asm/ftrace.h>
35
36 #include "trace_output.h"
37 #include "trace_stat.h"
38
39 #define FTRACE_WARN_ON(cond) \
40 do { \
41 if (WARN_ON(cond)) \
42 ftrace_kill(); \
43 } while (0)
44
45 #define FTRACE_WARN_ON_ONCE(cond) \
46 do { \
47 if (WARN_ON_ONCE(cond)) \
48 ftrace_kill(); \
49 } while (0)
50
51 /* hash bits for specific function selection */
52 #define FTRACE_HASH_BITS 7
53 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
54
55 /* ftrace_enabled is a method to turn ftrace on or off */
56 int ftrace_enabled __read_mostly;
57 static int last_ftrace_enabled;
58
59 /* Quick disabling of function tracer. */
60 int function_trace_stop;
61
62 /*
63 * ftrace_disabled is set when an anomaly is discovered.
64 * ftrace_disabled is much stronger than ftrace_enabled.
65 */
66 static int ftrace_disabled __read_mostly;
67
68 static DEFINE_MUTEX(ftrace_lock);
69
70 static struct ftrace_ops ftrace_list_end __read_mostly =
71 {
72 .func = ftrace_stub,
73 };
74
75 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
76 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
77 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
78 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
79
80 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
81 {
82 struct ftrace_ops *op = ftrace_list;
83
84 /* in case someone actually ports this to alpha! */
85 read_barrier_depends();
86
87 while (op != &ftrace_list_end) {
88 /* silly alpha */
89 read_barrier_depends();
90 op->func(ip, parent_ip);
91 op = op->next;
92 };
93 }
94
95 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
96 {
97 if (!test_tsk_trace_trace(current))
98 return;
99
100 ftrace_pid_function(ip, parent_ip);
101 }
102
103 static void set_ftrace_pid_function(ftrace_func_t func)
104 {
105 /* do not set ftrace_pid_function to itself! */
106 if (func != ftrace_pid_func)
107 ftrace_pid_function = func;
108 }
109
110 /**
111 * clear_ftrace_function - reset the ftrace function
112 *
113 * This NULLs the ftrace function and in essence stops
114 * tracing. There may be lag
115 */
116 void clear_ftrace_function(void)
117 {
118 ftrace_trace_function = ftrace_stub;
119 __ftrace_trace_function = ftrace_stub;
120 ftrace_pid_function = ftrace_stub;
121 }
122
123 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
124 /*
125 * For those archs that do not test ftrace_trace_stop in their
126 * mcount call site, we need to do it from C.
127 */
128 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
129 {
130 if (function_trace_stop)
131 return;
132
133 __ftrace_trace_function(ip, parent_ip);
134 }
135 #endif
136
137 static int __register_ftrace_function(struct ftrace_ops *ops)
138 {
139 ops->next = ftrace_list;
140 /*
141 * We are entering ops into the ftrace_list but another
142 * CPU might be walking that list. We need to make sure
143 * the ops->next pointer is valid before another CPU sees
144 * the ops pointer included into the ftrace_list.
145 */
146 smp_wmb();
147 ftrace_list = ops;
148
149 if (ftrace_enabled) {
150 ftrace_func_t func;
151
152 if (ops->next == &ftrace_list_end)
153 func = ops->func;
154 else
155 func = ftrace_list_func;
156
157 if (ftrace_pid_trace) {
158 set_ftrace_pid_function(func);
159 func = ftrace_pid_func;
160 }
161
162 /*
163 * For one func, simply call it directly.
164 * For more than one func, call the chain.
165 */
166 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
167 ftrace_trace_function = func;
168 #else
169 __ftrace_trace_function = func;
170 ftrace_trace_function = ftrace_test_stop_func;
171 #endif
172 }
173
174 return 0;
175 }
176
177 static int __unregister_ftrace_function(struct ftrace_ops *ops)
178 {
179 struct ftrace_ops **p;
180
181 /*
182 * If we are removing the last function, then simply point
183 * to the ftrace_stub.
184 */
185 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
186 ftrace_trace_function = ftrace_stub;
187 ftrace_list = &ftrace_list_end;
188 return 0;
189 }
190
191 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
192 if (*p == ops)
193 break;
194
195 if (*p != ops)
196 return -1;
197
198 *p = (*p)->next;
199
200 if (ftrace_enabled) {
201 /* If we only have one func left, then call that directly */
202 if (ftrace_list->next == &ftrace_list_end) {
203 ftrace_func_t func = ftrace_list->func;
204
205 if (ftrace_pid_trace) {
206 set_ftrace_pid_function(func);
207 func = ftrace_pid_func;
208 }
209 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
210 ftrace_trace_function = func;
211 #else
212 __ftrace_trace_function = func;
213 #endif
214 }
215 }
216
217 return 0;
218 }
219
220 static void ftrace_update_pid_func(void)
221 {
222 ftrace_func_t func;
223
224 if (ftrace_trace_function == ftrace_stub)
225 return;
226
227 func = ftrace_trace_function;
228
229 if (ftrace_pid_trace) {
230 set_ftrace_pid_function(func);
231 func = ftrace_pid_func;
232 } else {
233 if (func == ftrace_pid_func)
234 func = ftrace_pid_function;
235 }
236
237 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
238 ftrace_trace_function = func;
239 #else
240 __ftrace_trace_function = func;
241 #endif
242 }
243
244 #ifdef CONFIG_FUNCTION_PROFILER
245 struct ftrace_profile {
246 struct hlist_node node;
247 unsigned long ip;
248 unsigned long counter;
249 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
250 unsigned long long time;
251 #endif
252 };
253
254 struct ftrace_profile_page {
255 struct ftrace_profile_page *next;
256 unsigned long index;
257 struct ftrace_profile records[];
258 };
259
260 struct ftrace_profile_stat {
261 atomic_t disabled;
262 struct hlist_head *hash;
263 struct ftrace_profile_page *pages;
264 struct ftrace_profile_page *start;
265 struct tracer_stat stat;
266 };
267
268 #define PROFILE_RECORDS_SIZE \
269 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
270
271 #define PROFILES_PER_PAGE \
272 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
273
274 static int ftrace_profile_bits __read_mostly;
275 static int ftrace_profile_enabled __read_mostly;
276
277 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
278 static DEFINE_MUTEX(ftrace_profile_lock);
279
280 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
281
282 #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
283
284 static void *
285 function_stat_next(void *v, int idx)
286 {
287 struct ftrace_profile *rec = v;
288 struct ftrace_profile_page *pg;
289
290 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
291
292 again:
293 rec++;
294 if ((void *)rec >= (void *)&pg->records[pg->index]) {
295 pg = pg->next;
296 if (!pg)
297 return NULL;
298 rec = &pg->records[0];
299 if (!rec->counter)
300 goto again;
301 }
302
303 return rec;
304 }
305
306 static void *function_stat_start(struct tracer_stat *trace)
307 {
308 struct ftrace_profile_stat *stat =
309 container_of(trace, struct ftrace_profile_stat, stat);
310
311 if (!stat || !stat->start)
312 return NULL;
313
314 return function_stat_next(&stat->start->records[0], 0);
315 }
316
317 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
318 /* function graph compares on total time */
319 static int function_stat_cmp(void *p1, void *p2)
320 {
321 struct ftrace_profile *a = p1;
322 struct ftrace_profile *b = p2;
323
324 if (a->time < b->time)
325 return -1;
326 if (a->time > b->time)
327 return 1;
328 else
329 return 0;
330 }
331 #else
332 /* not function graph compares against hits */
333 static int function_stat_cmp(void *p1, void *p2)
334 {
335 struct ftrace_profile *a = p1;
336 struct ftrace_profile *b = p2;
337
338 if (a->counter < b->counter)
339 return -1;
340 if (a->counter > b->counter)
341 return 1;
342 else
343 return 0;
344 }
345 #endif
346
347 static int function_stat_headers(struct seq_file *m)
348 {
349 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
350 seq_printf(m, " Function "
351 "Hit Time Avg\n"
352 " -------- "
353 "--- ---- ---\n");
354 #else
355 seq_printf(m, " Function Hit\n"
356 " -------- ---\n");
357 #endif
358 return 0;
359 }
360
361 static int function_stat_show(struct seq_file *m, void *v)
362 {
363 struct ftrace_profile *rec = v;
364 char str[KSYM_SYMBOL_LEN];
365 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
366 static DEFINE_MUTEX(mutex);
367 static struct trace_seq s;
368 unsigned long long avg;
369 #endif
370
371 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
372 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
373
374 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
375 seq_printf(m, " ");
376 avg = rec->time;
377 do_div(avg, rec->counter);
378
379 mutex_lock(&mutex);
380 trace_seq_init(&s);
381 trace_print_graph_duration(rec->time, &s);
382 trace_seq_puts(&s, " ");
383 trace_print_graph_duration(avg, &s);
384 trace_print_seq(m, &s);
385 mutex_unlock(&mutex);
386 #endif
387 seq_putc(m, '\n');
388
389 return 0;
390 }
391
392 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
393 {
394 struct ftrace_profile_page *pg;
395
396 pg = stat->pages = stat->start;
397
398 while (pg) {
399 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
400 pg->index = 0;
401 pg = pg->next;
402 }
403
404 memset(stat->hash, 0,
405 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
406 }
407
408 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
409 {
410 struct ftrace_profile_page *pg;
411 int functions;
412 int pages;
413 int i;
414
415 /* If we already allocated, do nothing */
416 if (stat->pages)
417 return 0;
418
419 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
420 if (!stat->pages)
421 return -ENOMEM;
422
423 #ifdef CONFIG_DYNAMIC_FTRACE
424 functions = ftrace_update_tot_cnt;
425 #else
426 /*
427 * We do not know the number of functions that exist because
428 * dynamic tracing is what counts them. With past experience
429 * we have around 20K functions. That should be more than enough.
430 * It is highly unlikely we will execute every function in
431 * the kernel.
432 */
433 functions = 20000;
434 #endif
435
436 pg = stat->start = stat->pages;
437
438 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
439
440 for (i = 0; i < pages; i++) {
441 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
442 if (!pg->next)
443 goto out_free;
444 pg = pg->next;
445 }
446
447 return 0;
448
449 out_free:
450 pg = stat->start;
451 while (pg) {
452 unsigned long tmp = (unsigned long)pg;
453
454 pg = pg->next;
455 free_page(tmp);
456 }
457
458 free_page((unsigned long)stat->pages);
459 stat->pages = NULL;
460 stat->start = NULL;
461
462 return -ENOMEM;
463 }
464
465 static int ftrace_profile_init_cpu(int cpu)
466 {
467 struct ftrace_profile_stat *stat;
468 int size;
469
470 stat = &per_cpu(ftrace_profile_stats, cpu);
471
472 if (stat->hash) {
473 /* If the profile is already created, simply reset it */
474 ftrace_profile_reset(stat);
475 return 0;
476 }
477
478 /*
479 * We are profiling all functions, but usually only a few thousand
480 * functions are hit. We'll make a hash of 1024 items.
481 */
482 size = FTRACE_PROFILE_HASH_SIZE;
483
484 stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
485
486 if (!stat->hash)
487 return -ENOMEM;
488
489 if (!ftrace_profile_bits) {
490 size--;
491
492 for (; size; size >>= 1)
493 ftrace_profile_bits++;
494 }
495
496 /* Preallocate the function profiling pages */
497 if (ftrace_profile_pages_init(stat) < 0) {
498 kfree(stat->hash);
499 stat->hash = NULL;
500 return -ENOMEM;
501 }
502
503 return 0;
504 }
505
506 static int ftrace_profile_init(void)
507 {
508 int cpu;
509 int ret = 0;
510
511 for_each_online_cpu(cpu) {
512 ret = ftrace_profile_init_cpu(cpu);
513 if (ret)
514 break;
515 }
516
517 return ret;
518 }
519
520 /* interrupts must be disabled */
521 static struct ftrace_profile *
522 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
523 {
524 struct ftrace_profile *rec;
525 struct hlist_head *hhd;
526 struct hlist_node *n;
527 unsigned long key;
528
529 key = hash_long(ip, ftrace_profile_bits);
530 hhd = &stat->hash[key];
531
532 if (hlist_empty(hhd))
533 return NULL;
534
535 hlist_for_each_entry_rcu(rec, n, hhd, node) {
536 if (rec->ip == ip)
537 return rec;
538 }
539
540 return NULL;
541 }
542
543 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
544 struct ftrace_profile *rec)
545 {
546 unsigned long key;
547
548 key = hash_long(rec->ip, ftrace_profile_bits);
549 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
550 }
551
552 /*
553 * The memory is already allocated, this simply finds a new record to use.
554 */
555 static struct ftrace_profile *
556 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
557 {
558 struct ftrace_profile *rec = NULL;
559
560 /* prevent recursion (from NMIs) */
561 if (atomic_inc_return(&stat->disabled) != 1)
562 goto out;
563
564 /*
565 * Try to find the function again since an NMI
566 * could have added it
567 */
568 rec = ftrace_find_profiled_func(stat, ip);
569 if (rec)
570 goto out;
571
572 if (stat->pages->index == PROFILES_PER_PAGE) {
573 if (!stat->pages->next)
574 goto out;
575 stat->pages = stat->pages->next;
576 }
577
578 rec = &stat->pages->records[stat->pages->index++];
579 rec->ip = ip;
580 ftrace_add_profile(stat, rec);
581
582 out:
583 atomic_dec(&stat->disabled);
584
585 return rec;
586 }
587
588 static void
589 function_profile_call(unsigned long ip, unsigned long parent_ip)
590 {
591 struct ftrace_profile_stat *stat;
592 struct ftrace_profile *rec;
593 unsigned long flags;
594
595 if (!ftrace_profile_enabled)
596 return;
597
598 local_irq_save(flags);
599
600 stat = &__get_cpu_var(ftrace_profile_stats);
601 if (!stat->hash)
602 goto out;
603
604 rec = ftrace_find_profiled_func(stat, ip);
605 if (!rec) {
606 rec = ftrace_profile_alloc(stat, ip);
607 if (!rec)
608 goto out;
609 }
610
611 rec->counter++;
612 out:
613 local_irq_restore(flags);
614 }
615
616 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
617 static int profile_graph_entry(struct ftrace_graph_ent *trace)
618 {
619 function_profile_call(trace->func, 0);
620 return 1;
621 }
622
623 static void profile_graph_return(struct ftrace_graph_ret *trace)
624 {
625 struct ftrace_profile_stat *stat;
626 unsigned long long calltime;
627 struct ftrace_profile *rec;
628 unsigned long flags;
629
630 local_irq_save(flags);
631 stat = &__get_cpu_var(ftrace_profile_stats);
632 if (!stat->hash)
633 goto out;
634
635 calltime = trace->rettime - trace->calltime;
636
637 if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
638 int index;
639
640 index = trace->depth;
641
642 /* Append this call time to the parent time to subtract */
643 if (index)
644 current->ret_stack[index - 1].subtime += calltime;
645
646 if (current->ret_stack[index].subtime < calltime)
647 calltime -= current->ret_stack[index].subtime;
648 else
649 calltime = 0;
650 }
651
652 rec = ftrace_find_profiled_func(stat, trace->func);
653 if (rec)
654 rec->time += calltime;
655
656 out:
657 local_irq_restore(flags);
658 }
659
660 static int register_ftrace_profiler(void)
661 {
662 return register_ftrace_graph(&profile_graph_return,
663 &profile_graph_entry);
664 }
665
666 static void unregister_ftrace_profiler(void)
667 {
668 unregister_ftrace_graph();
669 }
670 #else
671 static struct ftrace_ops ftrace_profile_ops __read_mostly =
672 {
673 .func = function_profile_call,
674 };
675
676 static int register_ftrace_profiler(void)
677 {
678 return register_ftrace_function(&ftrace_profile_ops);
679 }
680
681 static void unregister_ftrace_profiler(void)
682 {
683 unregister_ftrace_function(&ftrace_profile_ops);
684 }
685 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
686
687 static ssize_t
688 ftrace_profile_write(struct file *filp, const char __user *ubuf,
689 size_t cnt, loff_t *ppos)
690 {
691 unsigned long val;
692 char buf[64]; /* big enough to hold a number */
693 int ret;
694
695 if (cnt >= sizeof(buf))
696 return -EINVAL;
697
698 if (copy_from_user(&buf, ubuf, cnt))
699 return -EFAULT;
700
701 buf[cnt] = 0;
702
703 ret = strict_strtoul(buf, 10, &val);
704 if (ret < 0)
705 return ret;
706
707 val = !!val;
708
709 mutex_lock(&ftrace_profile_lock);
710 if (ftrace_profile_enabled ^ val) {
711 if (val) {
712 ret = ftrace_profile_init();
713 if (ret < 0) {
714 cnt = ret;
715 goto out;
716 }
717
718 ret = register_ftrace_profiler();
719 if (ret < 0) {
720 cnt = ret;
721 goto out;
722 }
723 ftrace_profile_enabled = 1;
724 } else {
725 ftrace_profile_enabled = 0;
726 unregister_ftrace_profiler();
727 }
728 }
729 out:
730 mutex_unlock(&ftrace_profile_lock);
731
732 filp->f_pos += cnt;
733
734 return cnt;
735 }
736
737 static ssize_t
738 ftrace_profile_read(struct file *filp, char __user *ubuf,
739 size_t cnt, loff_t *ppos)
740 {
741 char buf[64]; /* big enough to hold a number */
742 int r;
743
744 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
745 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
746 }
747
748 static const struct file_operations ftrace_profile_fops = {
749 .open = tracing_open_generic,
750 .read = ftrace_profile_read,
751 .write = ftrace_profile_write,
752 };
753
754 /* used to initialize the real stat files */
755 static struct tracer_stat function_stats __initdata = {
756 .name = "functions",
757 .stat_start = function_stat_start,
758 .stat_next = function_stat_next,
759 .stat_cmp = function_stat_cmp,
760 .stat_headers = function_stat_headers,
761 .stat_show = function_stat_show
762 };
763
764 static void ftrace_profile_debugfs(struct dentry *d_tracer)
765 {
766 struct ftrace_profile_stat *stat;
767 struct dentry *entry;
768 char *name;
769 int ret;
770 int cpu;
771
772 for_each_possible_cpu(cpu) {
773 stat = &per_cpu(ftrace_profile_stats, cpu);
774
775 /* allocate enough for function name + cpu number */
776 name = kmalloc(32, GFP_KERNEL);
777 if (!name) {
778 /*
779 * The files created are permanent, if something happens
780 * we still do not free memory.
781 */
782 kfree(stat);
783 WARN(1,
784 "Could not allocate stat file for cpu %d\n",
785 cpu);
786 return;
787 }
788 stat->stat = function_stats;
789 snprintf(name, 32, "function%d", cpu);
790 stat->stat.name = name;
791 ret = register_stat_tracer(&stat->stat);
792 if (ret) {
793 WARN(1,
794 "Could not register function stat for cpu %d\n",
795 cpu);
796 kfree(name);
797 return;
798 }
799 }
800
801 entry = debugfs_create_file("function_profile_enabled", 0644,
802 d_tracer, NULL, &ftrace_profile_fops);
803 if (!entry)
804 pr_warning("Could not create debugfs "
805 "'function_profile_enabled' entry\n");
806 }
807
808 #else /* CONFIG_FUNCTION_PROFILER */
809 static void ftrace_profile_debugfs(struct dentry *d_tracer)
810 {
811 }
812 #endif /* CONFIG_FUNCTION_PROFILER */
813
814 /* set when tracing only a pid */
815 struct pid *ftrace_pid_trace;
816 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
817
818 #ifdef CONFIG_DYNAMIC_FTRACE
819
820 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
821 # error Dynamic ftrace depends on MCOUNT_RECORD
822 #endif
823
824 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
825
826 struct ftrace_func_probe {
827 struct hlist_node node;
828 struct ftrace_probe_ops *ops;
829 unsigned long flags;
830 unsigned long ip;
831 void *data;
832 struct rcu_head rcu;
833 };
834
835 enum {
836 FTRACE_ENABLE_CALLS = (1 << 0),
837 FTRACE_DISABLE_CALLS = (1 << 1),
838 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
839 FTRACE_ENABLE_MCOUNT = (1 << 3),
840 FTRACE_DISABLE_MCOUNT = (1 << 4),
841 FTRACE_START_FUNC_RET = (1 << 5),
842 FTRACE_STOP_FUNC_RET = (1 << 6),
843 };
844
845 static int ftrace_filtered;
846
847 static struct dyn_ftrace *ftrace_new_addrs;
848
849 static DEFINE_MUTEX(ftrace_regex_lock);
850
851 struct ftrace_page {
852 struct ftrace_page *next;
853 int index;
854 struct dyn_ftrace records[];
855 };
856
857 #define ENTRIES_PER_PAGE \
858 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
859
860 /* estimate from running different kernels */
861 #define NR_TO_INIT 10000
862
863 static struct ftrace_page *ftrace_pages_start;
864 static struct ftrace_page *ftrace_pages;
865
866 static struct dyn_ftrace *ftrace_free_records;
867
868 /*
869 * This is a double for. Do not use 'break' to break out of the loop,
870 * you must use a goto.
871 */
872 #define do_for_each_ftrace_rec(pg, rec) \
873 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
874 int _____i; \
875 for (_____i = 0; _____i < pg->index; _____i++) { \
876 rec = &pg->records[_____i];
877
878 #define while_for_each_ftrace_rec() \
879 } \
880 }
881
882 #ifdef CONFIG_KPROBES
883
884 static int frozen_record_count;
885
886 static inline void freeze_record(struct dyn_ftrace *rec)
887 {
888 if (!(rec->flags & FTRACE_FL_FROZEN)) {
889 rec->flags |= FTRACE_FL_FROZEN;
890 frozen_record_count++;
891 }
892 }
893
894 static inline void unfreeze_record(struct dyn_ftrace *rec)
895 {
896 if (rec->flags & FTRACE_FL_FROZEN) {
897 rec->flags &= ~FTRACE_FL_FROZEN;
898 frozen_record_count--;
899 }
900 }
901
902 static inline int record_frozen(struct dyn_ftrace *rec)
903 {
904 return rec->flags & FTRACE_FL_FROZEN;
905 }
906 #else
907 # define freeze_record(rec) ({ 0; })
908 # define unfreeze_record(rec) ({ 0; })
909 # define record_frozen(rec) ({ 0; })
910 #endif /* CONFIG_KPROBES */
911
912 static void ftrace_free_rec(struct dyn_ftrace *rec)
913 {
914 rec->freelist = ftrace_free_records;
915 ftrace_free_records = rec;
916 rec->flags |= FTRACE_FL_FREE;
917 }
918
919 void ftrace_release(void *start, unsigned long size)
920 {
921 struct dyn_ftrace *rec;
922 struct ftrace_page *pg;
923 unsigned long s = (unsigned long)start;
924 unsigned long e = s + size;
925
926 if (ftrace_disabled || !start)
927 return;
928
929 mutex_lock(&ftrace_lock);
930 do_for_each_ftrace_rec(pg, rec) {
931 if ((rec->ip >= s) && (rec->ip < e)) {
932 /*
933 * rec->ip is changed in ftrace_free_rec()
934 * It should not between s and e if record was freed.
935 */
936 FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
937 ftrace_free_rec(rec);
938 }
939 } while_for_each_ftrace_rec();
940 mutex_unlock(&ftrace_lock);
941 }
942
943 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
944 {
945 struct dyn_ftrace *rec;
946
947 /* First check for freed records */
948 if (ftrace_free_records) {
949 rec = ftrace_free_records;
950
951 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
952 FTRACE_WARN_ON_ONCE(1);
953 ftrace_free_records = NULL;
954 return NULL;
955 }
956
957 ftrace_free_records = rec->freelist;
958 memset(rec, 0, sizeof(*rec));
959 return rec;
960 }
961
962 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
963 if (!ftrace_pages->next) {
964 /* allocate another page */
965 ftrace_pages->next =
966 (void *)get_zeroed_page(GFP_KERNEL);
967 if (!ftrace_pages->next)
968 return NULL;
969 }
970 ftrace_pages = ftrace_pages->next;
971 }
972
973 return &ftrace_pages->records[ftrace_pages->index++];
974 }
975
976 static struct dyn_ftrace *
977 ftrace_record_ip(unsigned long ip)
978 {
979 struct dyn_ftrace *rec;
980
981 if (ftrace_disabled)
982 return NULL;
983
984 rec = ftrace_alloc_dyn_node(ip);
985 if (!rec)
986 return NULL;
987
988 rec->ip = ip;
989 rec->newlist = ftrace_new_addrs;
990 ftrace_new_addrs = rec;
991
992 return rec;
993 }
994
995 static void print_ip_ins(const char *fmt, unsigned char *p)
996 {
997 int i;
998
999 printk(KERN_CONT "%s", fmt);
1000
1001 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1002 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1003 }
1004
1005 static void ftrace_bug(int failed, unsigned long ip)
1006 {
1007 switch (failed) {
1008 case -EFAULT:
1009 FTRACE_WARN_ON_ONCE(1);
1010 pr_info("ftrace faulted on modifying ");
1011 print_ip_sym(ip);
1012 break;
1013 case -EINVAL:
1014 FTRACE_WARN_ON_ONCE(1);
1015 pr_info("ftrace failed to modify ");
1016 print_ip_sym(ip);
1017 print_ip_ins(" actual: ", (unsigned char *)ip);
1018 printk(KERN_CONT "\n");
1019 break;
1020 case -EPERM:
1021 FTRACE_WARN_ON_ONCE(1);
1022 pr_info("ftrace faulted on writing ");
1023 print_ip_sym(ip);
1024 break;
1025 default:
1026 FTRACE_WARN_ON_ONCE(1);
1027 pr_info("ftrace faulted on unknown error ");
1028 print_ip_sym(ip);
1029 }
1030 }
1031
1032
1033 static int
1034 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1035 {
1036 unsigned long ftrace_addr;
1037 unsigned long ip, fl;
1038
1039 ftrace_addr = (unsigned long)FTRACE_ADDR;
1040
1041 ip = rec->ip;
1042
1043 /*
1044 * If this record is not to be traced and
1045 * it is not enabled then do nothing.
1046 *
1047 * If this record is not to be traced and
1048 * it is enabled then disable it.
1049 *
1050 */
1051 if (rec->flags & FTRACE_FL_NOTRACE) {
1052 if (rec->flags & FTRACE_FL_ENABLED)
1053 rec->flags &= ~FTRACE_FL_ENABLED;
1054 else
1055 return 0;
1056
1057 } else if (ftrace_filtered && enable) {
1058 /*
1059 * Filtering is on:
1060 */
1061
1062 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
1063
1064 /* Record is filtered and enabled, do nothing */
1065 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
1066 return 0;
1067
1068 /* Record is not filtered or enabled, do nothing */
1069 if (!fl)
1070 return 0;
1071
1072 /* Record is not filtered but enabled, disable it */
1073 if (fl == FTRACE_FL_ENABLED)
1074 rec->flags &= ~FTRACE_FL_ENABLED;
1075 else
1076 /* Otherwise record is filtered but not enabled, enable it */
1077 rec->flags |= FTRACE_FL_ENABLED;
1078 } else {
1079 /* Disable or not filtered */
1080
1081 if (enable) {
1082 /* if record is enabled, do nothing */
1083 if (rec->flags & FTRACE_FL_ENABLED)
1084 return 0;
1085
1086 rec->flags |= FTRACE_FL_ENABLED;
1087
1088 } else {
1089
1090 /* if record is not enabled, do nothing */
1091 if (!(rec->flags & FTRACE_FL_ENABLED))
1092 return 0;
1093
1094 rec->flags &= ~FTRACE_FL_ENABLED;
1095 }
1096 }
1097
1098 if (rec->flags & FTRACE_FL_ENABLED)
1099 return ftrace_make_call(rec, ftrace_addr);
1100 else
1101 return ftrace_make_nop(NULL, rec, ftrace_addr);
1102 }
1103
1104 static void ftrace_replace_code(int enable)
1105 {
1106 struct dyn_ftrace *rec;
1107 struct ftrace_page *pg;
1108 int failed;
1109
1110 do_for_each_ftrace_rec(pg, rec) {
1111 /*
1112 * Skip over free records, records that have
1113 * failed and not converted.
1114 */
1115 if (rec->flags & FTRACE_FL_FREE ||
1116 rec->flags & FTRACE_FL_FAILED ||
1117 !(rec->flags & FTRACE_FL_CONVERTED))
1118 continue;
1119
1120 /* ignore updates to this record's mcount site */
1121 if (get_kprobe((void *)rec->ip)) {
1122 freeze_record(rec);
1123 continue;
1124 } else {
1125 unfreeze_record(rec);
1126 }
1127
1128 failed = __ftrace_replace_code(rec, enable);
1129 if (failed) {
1130 rec->flags |= FTRACE_FL_FAILED;
1131 if ((system_state == SYSTEM_BOOTING) ||
1132 !core_kernel_text(rec->ip)) {
1133 ftrace_free_rec(rec);
1134 } else {
1135 ftrace_bug(failed, rec->ip);
1136 /* Stop processing */
1137 return;
1138 }
1139 }
1140 } while_for_each_ftrace_rec();
1141 }
1142
1143 static int
1144 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1145 {
1146 unsigned long ip;
1147 int ret;
1148
1149 ip = rec->ip;
1150
1151 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1152 if (ret) {
1153 ftrace_bug(ret, ip);
1154 rec->flags |= FTRACE_FL_FAILED;
1155 return 0;
1156 }
1157 return 1;
1158 }
1159
1160 /*
1161 * archs can override this function if they must do something
1162 * before the modifying code is performed.
1163 */
1164 int __weak ftrace_arch_code_modify_prepare(void)
1165 {
1166 return 0;
1167 }
1168
1169 /*
1170 * archs can override this function if they must do something
1171 * after the modifying code is performed.
1172 */
1173 int __weak ftrace_arch_code_modify_post_process(void)
1174 {
1175 return 0;
1176 }
1177
1178 static int __ftrace_modify_code(void *data)
1179 {
1180 int *command = data;
1181
1182 if (*command & FTRACE_ENABLE_CALLS)
1183 ftrace_replace_code(1);
1184 else if (*command & FTRACE_DISABLE_CALLS)
1185 ftrace_replace_code(0);
1186
1187 if (*command & FTRACE_UPDATE_TRACE_FUNC)
1188 ftrace_update_ftrace_func(ftrace_trace_function);
1189
1190 if (*command & FTRACE_START_FUNC_RET)
1191 ftrace_enable_ftrace_graph_caller();
1192 else if (*command & FTRACE_STOP_FUNC_RET)
1193 ftrace_disable_ftrace_graph_caller();
1194
1195 return 0;
1196 }
1197
1198 static void ftrace_run_update_code(int command)
1199 {
1200 int ret;
1201
1202 ret = ftrace_arch_code_modify_prepare();
1203 FTRACE_WARN_ON(ret);
1204 if (ret)
1205 return;
1206
1207 stop_machine(__ftrace_modify_code, &command, NULL);
1208
1209 ret = ftrace_arch_code_modify_post_process();
1210 FTRACE_WARN_ON(ret);
1211 }
1212
1213 static ftrace_func_t saved_ftrace_func;
1214 static int ftrace_start_up;
1215
1216 static void ftrace_startup_enable(int command)
1217 {
1218 if (saved_ftrace_func != ftrace_trace_function) {
1219 saved_ftrace_func = ftrace_trace_function;
1220 command |= FTRACE_UPDATE_TRACE_FUNC;
1221 }
1222
1223 if (!command || !ftrace_enabled)
1224 return;
1225
1226 ftrace_run_update_code(command);
1227 }
1228
1229 static void ftrace_startup(int command)
1230 {
1231 if (unlikely(ftrace_disabled))
1232 return;
1233
1234 ftrace_start_up++;
1235 command |= FTRACE_ENABLE_CALLS;
1236
1237 ftrace_startup_enable(command);
1238 }
1239
1240 static void ftrace_shutdown(int command)
1241 {
1242 if (unlikely(ftrace_disabled))
1243 return;
1244
1245 ftrace_start_up--;
1246 if (!ftrace_start_up)
1247 command |= FTRACE_DISABLE_CALLS;
1248
1249 if (saved_ftrace_func != ftrace_trace_function) {
1250 saved_ftrace_func = ftrace_trace_function;
1251 command |= FTRACE_UPDATE_TRACE_FUNC;
1252 }
1253
1254 if (!command || !ftrace_enabled)
1255 return;
1256
1257 ftrace_run_update_code(command);
1258 }
1259
1260 static void ftrace_startup_sysctl(void)
1261 {
1262 int command = FTRACE_ENABLE_MCOUNT;
1263
1264 if (unlikely(ftrace_disabled))
1265 return;
1266
1267 /* Force update next time */
1268 saved_ftrace_func = NULL;
1269 /* ftrace_start_up is true if we want ftrace running */
1270 if (ftrace_start_up)
1271 command |= FTRACE_ENABLE_CALLS;
1272
1273 ftrace_run_update_code(command);
1274 }
1275
1276 static void ftrace_shutdown_sysctl(void)
1277 {
1278 int command = FTRACE_DISABLE_MCOUNT;
1279
1280 if (unlikely(ftrace_disabled))
1281 return;
1282
1283 /* ftrace_start_up is true if ftrace is running */
1284 if (ftrace_start_up)
1285 command |= FTRACE_DISABLE_CALLS;
1286
1287 ftrace_run_update_code(command);
1288 }
1289
1290 static cycle_t ftrace_update_time;
1291 static unsigned long ftrace_update_cnt;
1292 unsigned long ftrace_update_tot_cnt;
1293
1294 static int ftrace_update_code(struct module *mod)
1295 {
1296 struct dyn_ftrace *p;
1297 cycle_t start, stop;
1298
1299 start = ftrace_now(raw_smp_processor_id());
1300 ftrace_update_cnt = 0;
1301
1302 while (ftrace_new_addrs) {
1303
1304 /* If something went wrong, bail without enabling anything */
1305 if (unlikely(ftrace_disabled))
1306 return -1;
1307
1308 p = ftrace_new_addrs;
1309 ftrace_new_addrs = p->newlist;
1310 p->flags = 0L;
1311
1312 /* convert record (i.e, patch mcount-call with NOP) */
1313 if (ftrace_code_disable(mod, p)) {
1314 p->flags |= FTRACE_FL_CONVERTED;
1315 ftrace_update_cnt++;
1316 } else
1317 ftrace_free_rec(p);
1318 }
1319
1320 stop = ftrace_now(raw_smp_processor_id());
1321 ftrace_update_time = stop - start;
1322 ftrace_update_tot_cnt += ftrace_update_cnt;
1323
1324 return 0;
1325 }
1326
1327 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
1328 {
1329 struct ftrace_page *pg;
1330 int cnt;
1331 int i;
1332
1333 /* allocate a few pages */
1334 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
1335 if (!ftrace_pages_start)
1336 return -1;
1337
1338 /*
1339 * Allocate a few more pages.
1340 *
1341 * TODO: have some parser search vmlinux before
1342 * final linking to find all calls to ftrace.
1343 * Then we can:
1344 * a) know how many pages to allocate.
1345 * and/or
1346 * b) set up the table then.
1347 *
1348 * The dynamic code is still necessary for
1349 * modules.
1350 */
1351
1352 pg = ftrace_pages = ftrace_pages_start;
1353
1354 cnt = num_to_init / ENTRIES_PER_PAGE;
1355 pr_info("ftrace: allocating %ld entries in %d pages\n",
1356 num_to_init, cnt + 1);
1357
1358 for (i = 0; i < cnt; i++) {
1359 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
1360
1361 /* If we fail, we'll try later anyway */
1362 if (!pg->next)
1363 break;
1364
1365 pg = pg->next;
1366 }
1367
1368 return 0;
1369 }
1370
1371 enum {
1372 FTRACE_ITER_FILTER = (1 << 0),
1373 FTRACE_ITER_CONT = (1 << 1),
1374 FTRACE_ITER_NOTRACE = (1 << 2),
1375 FTRACE_ITER_FAILURES = (1 << 3),
1376 FTRACE_ITER_PRINTALL = (1 << 4),
1377 FTRACE_ITER_HASH = (1 << 5),
1378 };
1379
1380 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
1381
1382 struct ftrace_iterator {
1383 struct ftrace_page *pg;
1384 int hidx;
1385 int idx;
1386 unsigned flags;
1387 unsigned char buffer[FTRACE_BUFF_MAX+1];
1388 unsigned buffer_idx;
1389 unsigned filtered;
1390 };
1391
1392 static void *
1393 t_hash_next(struct seq_file *m, void *v, loff_t *pos)
1394 {
1395 struct ftrace_iterator *iter = m->private;
1396 struct hlist_node *hnd = v;
1397 struct hlist_head *hhd;
1398
1399 WARN_ON(!(iter->flags & FTRACE_ITER_HASH));
1400
1401 (*pos)++;
1402
1403 retry:
1404 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
1405 return NULL;
1406
1407 hhd = &ftrace_func_hash[iter->hidx];
1408
1409 if (hlist_empty(hhd)) {
1410 iter->hidx++;
1411 hnd = NULL;
1412 goto retry;
1413 }
1414
1415 if (!hnd)
1416 hnd = hhd->first;
1417 else {
1418 hnd = hnd->next;
1419 if (!hnd) {
1420 iter->hidx++;
1421 goto retry;
1422 }
1423 }
1424
1425 return hnd;
1426 }
1427
1428 static void *t_hash_start(struct seq_file *m, loff_t *pos)
1429 {
1430 struct ftrace_iterator *iter = m->private;
1431 void *p = NULL;
1432
1433 iter->flags |= FTRACE_ITER_HASH;
1434
1435 return t_hash_next(m, p, pos);
1436 }
1437
1438 static int t_hash_show(struct seq_file *m, void *v)
1439 {
1440 struct ftrace_func_probe *rec;
1441 struct hlist_node *hnd = v;
1442 char str[KSYM_SYMBOL_LEN];
1443
1444 rec = hlist_entry(hnd, struct ftrace_func_probe, node);
1445
1446 if (rec->ops->print)
1447 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
1448
1449 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1450 seq_printf(m, "%s:", str);
1451
1452 kallsyms_lookup((unsigned long)rec->ops->func, NULL, NULL, NULL, str);
1453 seq_printf(m, "%s", str);
1454
1455 if (rec->data)
1456 seq_printf(m, ":%p", rec->data);
1457 seq_putc(m, '\n');
1458
1459 return 0;
1460 }
1461
1462 static void *
1463 t_next(struct seq_file *m, void *v, loff_t *pos)
1464 {
1465 struct ftrace_iterator *iter = m->private;
1466 struct dyn_ftrace *rec = NULL;
1467
1468 if (iter->flags & FTRACE_ITER_HASH)
1469 return t_hash_next(m, v, pos);
1470
1471 (*pos)++;
1472
1473 if (iter->flags & FTRACE_ITER_PRINTALL)
1474 return NULL;
1475
1476 retry:
1477 if (iter->idx >= iter->pg->index) {
1478 if (iter->pg->next) {
1479 iter->pg = iter->pg->next;
1480 iter->idx = 0;
1481 goto retry;
1482 } else {
1483 iter->idx = -1;
1484 }
1485 } else {
1486 rec = &iter->pg->records[iter->idx++];
1487 if ((rec->flags & FTRACE_FL_FREE) ||
1488
1489 (!(iter->flags & FTRACE_ITER_FAILURES) &&
1490 (rec->flags & FTRACE_FL_FAILED)) ||
1491
1492 ((iter->flags & FTRACE_ITER_FAILURES) &&
1493 !(rec->flags & FTRACE_FL_FAILED)) ||
1494
1495 ((iter->flags & FTRACE_ITER_FILTER) &&
1496 !(rec->flags & FTRACE_FL_FILTER)) ||
1497
1498 ((iter->flags & FTRACE_ITER_NOTRACE) &&
1499 !(rec->flags & FTRACE_FL_NOTRACE))) {
1500 rec = NULL;
1501 goto retry;
1502 }
1503 }
1504
1505 return rec;
1506 }
1507
1508 static void *t_start(struct seq_file *m, loff_t *pos)
1509 {
1510 struct ftrace_iterator *iter = m->private;
1511 void *p = NULL;
1512
1513 mutex_lock(&ftrace_lock);
1514 /*
1515 * For set_ftrace_filter reading, if we have the filter
1516 * off, we can short cut and just print out that all
1517 * functions are enabled.
1518 */
1519 if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
1520 if (*pos > 0)
1521 return t_hash_start(m, pos);
1522 iter->flags |= FTRACE_ITER_PRINTALL;
1523 (*pos)++;
1524 return iter;
1525 }
1526
1527 if (iter->flags & FTRACE_ITER_HASH)
1528 return t_hash_start(m, pos);
1529
1530 if (*pos > 0) {
1531 if (iter->idx < 0)
1532 return p;
1533 (*pos)--;
1534 iter->idx--;
1535 }
1536
1537 p = t_next(m, p, pos);
1538
1539 if (!p)
1540 return t_hash_start(m, pos);
1541
1542 return p;
1543 }
1544
1545 static void t_stop(struct seq_file *m, void *p)
1546 {
1547 mutex_unlock(&ftrace_lock);
1548 }
1549
1550 static int t_show(struct seq_file *m, void *v)
1551 {
1552 struct ftrace_iterator *iter = m->private;
1553 struct dyn_ftrace *rec = v;
1554 char str[KSYM_SYMBOL_LEN];
1555
1556 if (iter->flags & FTRACE_ITER_HASH)
1557 return t_hash_show(m, v);
1558
1559 if (iter->flags & FTRACE_ITER_PRINTALL) {
1560 seq_printf(m, "#### all functions enabled ####\n");
1561 return 0;
1562 }
1563
1564 if (!rec)
1565 return 0;
1566
1567 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1568
1569 seq_printf(m, "%s\n", str);
1570
1571 return 0;
1572 }
1573
1574 static struct seq_operations show_ftrace_seq_ops = {
1575 .start = t_start,
1576 .next = t_next,
1577 .stop = t_stop,
1578 .show = t_show,
1579 };
1580
1581 static int
1582 ftrace_avail_open(struct inode *inode, struct file *file)
1583 {
1584 struct ftrace_iterator *iter;
1585 int ret;
1586
1587 if (unlikely(ftrace_disabled))
1588 return -ENODEV;
1589
1590 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1591 if (!iter)
1592 return -ENOMEM;
1593
1594 iter->pg = ftrace_pages_start;
1595
1596 ret = seq_open(file, &show_ftrace_seq_ops);
1597 if (!ret) {
1598 struct seq_file *m = file->private_data;
1599
1600 m->private = iter;
1601 } else {
1602 kfree(iter);
1603 }
1604
1605 return ret;
1606 }
1607
1608 int ftrace_avail_release(struct inode *inode, struct file *file)
1609 {
1610 struct seq_file *m = (struct seq_file *)file->private_data;
1611 struct ftrace_iterator *iter = m->private;
1612
1613 seq_release(inode, file);
1614 kfree(iter);
1615
1616 return 0;
1617 }
1618
1619 static int
1620 ftrace_failures_open(struct inode *inode, struct file *file)
1621 {
1622 int ret;
1623 struct seq_file *m;
1624 struct ftrace_iterator *iter;
1625
1626 ret = ftrace_avail_open(inode, file);
1627 if (!ret) {
1628 m = (struct seq_file *)file->private_data;
1629 iter = (struct ftrace_iterator *)m->private;
1630 iter->flags = FTRACE_ITER_FAILURES;
1631 }
1632
1633 return ret;
1634 }
1635
1636
1637 static void ftrace_filter_reset(int enable)
1638 {
1639 struct ftrace_page *pg;
1640 struct dyn_ftrace *rec;
1641 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1642
1643 mutex_lock(&ftrace_lock);
1644 if (enable)
1645 ftrace_filtered = 0;
1646 do_for_each_ftrace_rec(pg, rec) {
1647 if (rec->flags & FTRACE_FL_FAILED)
1648 continue;
1649 rec->flags &= ~type;
1650 } while_for_each_ftrace_rec();
1651 mutex_unlock(&ftrace_lock);
1652 }
1653
1654 static int
1655 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1656 {
1657 struct ftrace_iterator *iter;
1658 int ret = 0;
1659
1660 if (unlikely(ftrace_disabled))
1661 return -ENODEV;
1662
1663 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1664 if (!iter)
1665 return -ENOMEM;
1666
1667 mutex_lock(&ftrace_regex_lock);
1668 if ((file->f_mode & FMODE_WRITE) &&
1669 !(file->f_flags & O_APPEND))
1670 ftrace_filter_reset(enable);
1671
1672 if (file->f_mode & FMODE_READ) {
1673 iter->pg = ftrace_pages_start;
1674 iter->flags = enable ? FTRACE_ITER_FILTER :
1675 FTRACE_ITER_NOTRACE;
1676
1677 ret = seq_open(file, &show_ftrace_seq_ops);
1678 if (!ret) {
1679 struct seq_file *m = file->private_data;
1680 m->private = iter;
1681 } else
1682 kfree(iter);
1683 } else
1684 file->private_data = iter;
1685 mutex_unlock(&ftrace_regex_lock);
1686
1687 return ret;
1688 }
1689
1690 static int
1691 ftrace_filter_open(struct inode *inode, struct file *file)
1692 {
1693 return ftrace_regex_open(inode, file, 1);
1694 }
1695
1696 static int
1697 ftrace_notrace_open(struct inode *inode, struct file *file)
1698 {
1699 return ftrace_regex_open(inode, file, 0);
1700 }
1701
1702 static loff_t
1703 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1704 {
1705 loff_t ret;
1706
1707 if (file->f_mode & FMODE_READ)
1708 ret = seq_lseek(file, offset, origin);
1709 else
1710 file->f_pos = ret = 1;
1711
1712 return ret;
1713 }
1714
1715 enum {
1716 MATCH_FULL,
1717 MATCH_FRONT_ONLY,
1718 MATCH_MIDDLE_ONLY,
1719 MATCH_END_ONLY,
1720 };
1721
1722 /*
1723 * (static function - no need for kernel doc)
1724 *
1725 * Pass in a buffer containing a glob and this function will
1726 * set search to point to the search part of the buffer and
1727 * return the type of search it is (see enum above).
1728 * This does modify buff.
1729 *
1730 * Returns enum type.
1731 * search returns the pointer to use for comparison.
1732 * not returns 1 if buff started with a '!'
1733 * 0 otherwise.
1734 */
1735 static int
1736 ftrace_setup_glob(char *buff, int len, char **search, int *not)
1737 {
1738 int type = MATCH_FULL;
1739 int i;
1740
1741 if (buff[0] == '!') {
1742 *not = 1;
1743 buff++;
1744 len--;
1745 } else
1746 *not = 0;
1747
1748 *search = buff;
1749
1750 for (i = 0; i < len; i++) {
1751 if (buff[i] == '*') {
1752 if (!i) {
1753 *search = buff + 1;
1754 type = MATCH_END_ONLY;
1755 } else {
1756 if (type == MATCH_END_ONLY)
1757 type = MATCH_MIDDLE_ONLY;
1758 else
1759 type = MATCH_FRONT_ONLY;
1760 buff[i] = 0;
1761 break;
1762 }
1763 }
1764 }
1765
1766 return type;
1767 }
1768
1769 static int ftrace_match(char *str, char *regex, int len, int type)
1770 {
1771 int matched = 0;
1772 char *ptr;
1773
1774 switch (type) {
1775 case MATCH_FULL:
1776 if (strcmp(str, regex) == 0)
1777 matched = 1;
1778 break;
1779 case MATCH_FRONT_ONLY:
1780 if (strncmp(str, regex, len) == 0)
1781 matched = 1;
1782 break;
1783 case MATCH_MIDDLE_ONLY:
1784 if (strstr(str, regex))
1785 matched = 1;
1786 break;
1787 case MATCH_END_ONLY:
1788 ptr = strstr(str, regex);
1789 if (ptr && (ptr[len] == 0))
1790 matched = 1;
1791 break;
1792 }
1793
1794 return matched;
1795 }
1796
1797 static int
1798 ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
1799 {
1800 char str[KSYM_SYMBOL_LEN];
1801
1802 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1803 return ftrace_match(str, regex, len, type);
1804 }
1805
1806 static void ftrace_match_records(char *buff, int len, int enable)
1807 {
1808 unsigned int search_len;
1809 struct ftrace_page *pg;
1810 struct dyn_ftrace *rec;
1811 unsigned long flag;
1812 char *search;
1813 int type;
1814 int not;
1815
1816 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1817 type = ftrace_setup_glob(buff, len, &search, &not);
1818
1819 search_len = strlen(search);
1820
1821 mutex_lock(&ftrace_lock);
1822 do_for_each_ftrace_rec(pg, rec) {
1823
1824 if (rec->flags & FTRACE_FL_FAILED)
1825 continue;
1826
1827 if (ftrace_match_record(rec, search, search_len, type)) {
1828 if (not)
1829 rec->flags &= ~flag;
1830 else
1831 rec->flags |= flag;
1832 }
1833 /*
1834 * Only enable filtering if we have a function that
1835 * is filtered on.
1836 */
1837 if (enable && (rec->flags & FTRACE_FL_FILTER))
1838 ftrace_filtered = 1;
1839 } while_for_each_ftrace_rec();
1840 mutex_unlock(&ftrace_lock);
1841 }
1842
1843 static int
1844 ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
1845 char *regex, int len, int type)
1846 {
1847 char str[KSYM_SYMBOL_LEN];
1848 char *modname;
1849
1850 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
1851
1852 if (!modname || strcmp(modname, mod))
1853 return 0;
1854
1855 /* blank search means to match all funcs in the mod */
1856 if (len)
1857 return ftrace_match(str, regex, len, type);
1858 else
1859 return 1;
1860 }
1861
1862 static void ftrace_match_module_records(char *buff, char *mod, int enable)
1863 {
1864 unsigned search_len = 0;
1865 struct ftrace_page *pg;
1866 struct dyn_ftrace *rec;
1867 int type = MATCH_FULL;
1868 char *search = buff;
1869 unsigned long flag;
1870 int not = 0;
1871
1872 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1873
1874 /* blank or '*' mean the same */
1875 if (strcmp(buff, "*") == 0)
1876 buff[0] = 0;
1877
1878 /* handle the case of 'dont filter this module' */
1879 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
1880 buff[0] = 0;
1881 not = 1;
1882 }
1883
1884 if (strlen(buff)) {
1885 type = ftrace_setup_glob(buff, strlen(buff), &search, &not);
1886 search_len = strlen(search);
1887 }
1888
1889 mutex_lock(&ftrace_lock);
1890 do_for_each_ftrace_rec(pg, rec) {
1891
1892 if (rec->flags & FTRACE_FL_FAILED)
1893 continue;
1894
1895 if (ftrace_match_module_record(rec, mod,
1896 search, search_len, type)) {
1897 if (not)
1898 rec->flags &= ~flag;
1899 else
1900 rec->flags |= flag;
1901 }
1902 if (enable && (rec->flags & FTRACE_FL_FILTER))
1903 ftrace_filtered = 1;
1904
1905 } while_for_each_ftrace_rec();
1906 mutex_unlock(&ftrace_lock);
1907 }
1908
1909 /*
1910 * We register the module command as a template to show others how
1911 * to register the a command as well.
1912 */
1913
1914 static int
1915 ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
1916 {
1917 char *mod;
1918
1919 /*
1920 * cmd == 'mod' because we only registered this func
1921 * for the 'mod' ftrace_func_command.
1922 * But if you register one func with multiple commands,
1923 * you can tell which command was used by the cmd
1924 * parameter.
1925 */
1926
1927 /* we must have a module name */
1928 if (!param)
1929 return -EINVAL;
1930
1931 mod = strsep(&param, ":");
1932 if (!strlen(mod))
1933 return -EINVAL;
1934
1935 ftrace_match_module_records(func, mod, enable);
1936 return 0;
1937 }
1938
1939 static struct ftrace_func_command ftrace_mod_cmd = {
1940 .name = "mod",
1941 .func = ftrace_mod_callback,
1942 };
1943
1944 static int __init ftrace_mod_cmd_init(void)
1945 {
1946 return register_ftrace_command(&ftrace_mod_cmd);
1947 }
1948 device_initcall(ftrace_mod_cmd_init);
1949
1950 static void
1951 function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
1952 {
1953 struct ftrace_func_probe *entry;
1954 struct hlist_head *hhd;
1955 struct hlist_node *n;
1956 unsigned long key;
1957 int resched;
1958
1959 key = hash_long(ip, FTRACE_HASH_BITS);
1960
1961 hhd = &ftrace_func_hash[key];
1962
1963 if (hlist_empty(hhd))
1964 return;
1965
1966 /*
1967 * Disable preemption for these calls to prevent a RCU grace
1968 * period. This syncs the hash iteration and freeing of items
1969 * on the hash. rcu_read_lock is too dangerous here.
1970 */
1971 resched = ftrace_preempt_disable();
1972 hlist_for_each_entry_rcu(entry, n, hhd, node) {
1973 if (entry->ip == ip)
1974 entry->ops->func(ip, parent_ip, &entry->data);
1975 }
1976 ftrace_preempt_enable(resched);
1977 }
1978
1979 static struct ftrace_ops trace_probe_ops __read_mostly =
1980 {
1981 .func = function_trace_probe_call,
1982 };
1983
1984 static int ftrace_probe_registered;
1985
1986 static void __enable_ftrace_function_probe(void)
1987 {
1988 int i;
1989
1990 if (ftrace_probe_registered)
1991 return;
1992
1993 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1994 struct hlist_head *hhd = &ftrace_func_hash[i];
1995 if (hhd->first)
1996 break;
1997 }
1998 /* Nothing registered? */
1999 if (i == FTRACE_FUNC_HASHSIZE)
2000 return;
2001
2002 __register_ftrace_function(&trace_probe_ops);
2003 ftrace_startup(0);
2004 ftrace_probe_registered = 1;
2005 }
2006
2007 static void __disable_ftrace_function_probe(void)
2008 {
2009 int i;
2010
2011 if (!ftrace_probe_registered)
2012 return;
2013
2014 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2015 struct hlist_head *hhd = &ftrace_func_hash[i];
2016 if (hhd->first)
2017 return;
2018 }
2019
2020 /* no more funcs left */
2021 __unregister_ftrace_function(&trace_probe_ops);
2022 ftrace_shutdown(0);
2023 ftrace_probe_registered = 0;
2024 }
2025
2026
2027 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
2028 {
2029 struct ftrace_func_probe *entry =
2030 container_of(rhp, struct ftrace_func_probe, rcu);
2031
2032 if (entry->ops->free)
2033 entry->ops->free(&entry->data);
2034 kfree(entry);
2035 }
2036
2037
2038 int
2039 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2040 void *data)
2041 {
2042 struct ftrace_func_probe *entry;
2043 struct ftrace_page *pg;
2044 struct dyn_ftrace *rec;
2045 int type, len, not;
2046 unsigned long key;
2047 int count = 0;
2048 char *search;
2049
2050 type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
2051 len = strlen(search);
2052
2053 /* we do not support '!' for function probes */
2054 if (WARN_ON(not))
2055 return -EINVAL;
2056
2057 mutex_lock(&ftrace_lock);
2058 do_for_each_ftrace_rec(pg, rec) {
2059
2060 if (rec->flags & FTRACE_FL_FAILED)
2061 continue;
2062
2063 if (!ftrace_match_record(rec, search, len, type))
2064 continue;
2065
2066 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2067 if (!entry) {
2068 /* If we did not process any, then return error */
2069 if (!count)
2070 count = -ENOMEM;
2071 goto out_unlock;
2072 }
2073
2074 count++;
2075
2076 entry->data = data;
2077
2078 /*
2079 * The caller might want to do something special
2080 * for each function we find. We call the callback
2081 * to give the caller an opportunity to do so.
2082 */
2083 if (ops->callback) {
2084 if (ops->callback(rec->ip, &entry->data) < 0) {
2085 /* caller does not like this func */
2086 kfree(entry);
2087 continue;
2088 }
2089 }
2090
2091 entry->ops = ops;
2092 entry->ip = rec->ip;
2093
2094 key = hash_long(entry->ip, FTRACE_HASH_BITS);
2095 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
2096
2097 } while_for_each_ftrace_rec();
2098 __enable_ftrace_function_probe();
2099
2100 out_unlock:
2101 mutex_unlock(&ftrace_lock);
2102
2103 return count;
2104 }
2105
2106 enum {
2107 PROBE_TEST_FUNC = 1,
2108 PROBE_TEST_DATA = 2
2109 };
2110
2111 static void
2112 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2113 void *data, int flags)
2114 {
2115 struct ftrace_func_probe *entry;
2116 struct hlist_node *n, *tmp;
2117 char str[KSYM_SYMBOL_LEN];
2118 int type = MATCH_FULL;
2119 int i, len = 0;
2120 char *search;
2121
2122 if (glob && (strcmp(glob, "*") || !strlen(glob)))
2123 glob = NULL;
2124 else {
2125 int not;
2126
2127 type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
2128 len = strlen(search);
2129
2130 /* we do not support '!' for function probes */
2131 if (WARN_ON(not))
2132 return;
2133 }
2134
2135 mutex_lock(&ftrace_lock);
2136 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2137 struct hlist_head *hhd = &ftrace_func_hash[i];
2138
2139 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
2140
2141 /* break up if statements for readability */
2142 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
2143 continue;
2144
2145 if ((flags & PROBE_TEST_DATA) && entry->data != data)
2146 continue;
2147
2148 /* do this last, since it is the most expensive */
2149 if (glob) {
2150 kallsyms_lookup(entry->ip, NULL, NULL,
2151 NULL, str);
2152 if (!ftrace_match(str, glob, len, type))
2153 continue;
2154 }
2155
2156 hlist_del(&entry->node);
2157 call_rcu(&entry->rcu, ftrace_free_entry_rcu);
2158 }
2159 }
2160 __disable_ftrace_function_probe();
2161 mutex_unlock(&ftrace_lock);
2162 }
2163
2164 void
2165 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2166 void *data)
2167 {
2168 __unregister_ftrace_function_probe(glob, ops, data,
2169 PROBE_TEST_FUNC | PROBE_TEST_DATA);
2170 }
2171
2172 void
2173 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
2174 {
2175 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
2176 }
2177
2178 void unregister_ftrace_function_probe_all(char *glob)
2179 {
2180 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
2181 }
2182
2183 static LIST_HEAD(ftrace_commands);
2184 static DEFINE_MUTEX(ftrace_cmd_mutex);
2185
2186 int register_ftrace_command(struct ftrace_func_command *cmd)
2187 {
2188 struct ftrace_func_command *p;
2189 int ret = 0;
2190
2191 mutex_lock(&ftrace_cmd_mutex);
2192 list_for_each_entry(p, &ftrace_commands, list) {
2193 if (strcmp(cmd->name, p->name) == 0) {
2194 ret = -EBUSY;
2195 goto out_unlock;
2196 }
2197 }
2198 list_add(&cmd->list, &ftrace_commands);
2199 out_unlock:
2200 mutex_unlock(&ftrace_cmd_mutex);
2201
2202 return ret;
2203 }
2204
2205 int unregister_ftrace_command(struct ftrace_func_command *cmd)
2206 {
2207 struct ftrace_func_command *p, *n;
2208 int ret = -ENODEV;
2209
2210 mutex_lock(&ftrace_cmd_mutex);
2211 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
2212 if (strcmp(cmd->name, p->name) == 0) {
2213 ret = 0;
2214 list_del_init(&p->list);
2215 goto out_unlock;
2216 }
2217 }
2218 out_unlock:
2219 mutex_unlock(&ftrace_cmd_mutex);
2220
2221 return ret;
2222 }
2223
2224 static int ftrace_process_regex(char *buff, int len, int enable)
2225 {
2226 char *func, *command, *next = buff;
2227 struct ftrace_func_command *p;
2228 int ret = -EINVAL;
2229
2230 func = strsep(&next, ":");
2231
2232 if (!next) {
2233 ftrace_match_records(func, len, enable);
2234 return 0;
2235 }
2236
2237 /* command found */
2238
2239 command = strsep(&next, ":");
2240
2241 mutex_lock(&ftrace_cmd_mutex);
2242 list_for_each_entry(p, &ftrace_commands, list) {
2243 if (strcmp(p->name, command) == 0) {
2244 ret = p->func(func, command, next, enable);
2245 goto out_unlock;
2246 }
2247 }
2248 out_unlock:
2249 mutex_unlock(&ftrace_cmd_mutex);
2250
2251 return ret;
2252 }
2253
2254 static ssize_t
2255 ftrace_regex_write(struct file *file, const char __user *ubuf,
2256 size_t cnt, loff_t *ppos, int enable)
2257 {
2258 struct ftrace_iterator *iter;
2259 char ch;
2260 size_t read = 0;
2261 ssize_t ret;
2262
2263 if (!cnt || cnt < 0)
2264 return 0;
2265
2266 mutex_lock(&ftrace_regex_lock);
2267
2268 if (file->f_mode & FMODE_READ) {
2269 struct seq_file *m = file->private_data;
2270 iter = m->private;
2271 } else
2272 iter = file->private_data;
2273
2274 if (!*ppos) {
2275 iter->flags &= ~FTRACE_ITER_CONT;
2276 iter->buffer_idx = 0;
2277 }
2278
2279 ret = get_user(ch, ubuf++);
2280 if (ret)
2281 goto out;
2282 read++;
2283 cnt--;
2284
2285 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
2286 /* skip white space */
2287 while (cnt && isspace(ch)) {
2288 ret = get_user(ch, ubuf++);
2289 if (ret)
2290 goto out;
2291 read++;
2292 cnt--;
2293 }
2294
2295 if (isspace(ch)) {
2296 file->f_pos += read;
2297 ret = read;
2298 goto out;
2299 }
2300
2301 iter->buffer_idx = 0;
2302 }
2303
2304 while (cnt && !isspace(ch)) {
2305 if (iter->buffer_idx < FTRACE_BUFF_MAX)
2306 iter->buffer[iter->buffer_idx++] = ch;
2307 else {
2308 ret = -EINVAL;
2309 goto out;
2310 }
2311 ret = get_user(ch, ubuf++);
2312 if (ret)
2313 goto out;
2314 read++;
2315 cnt--;
2316 }
2317
2318 if (isspace(ch)) {
2319 iter->filtered++;
2320 iter->buffer[iter->buffer_idx] = 0;
2321 ret = ftrace_process_regex(iter->buffer,
2322 iter->buffer_idx, enable);
2323 if (ret)
2324 goto out;
2325 iter->buffer_idx = 0;
2326 } else
2327 iter->flags |= FTRACE_ITER_CONT;
2328
2329
2330 file->f_pos += read;
2331
2332 ret = read;
2333 out:
2334 mutex_unlock(&ftrace_regex_lock);
2335
2336 return ret;
2337 }
2338
2339 static ssize_t
2340 ftrace_filter_write(struct file *file, const char __user *ubuf,
2341 size_t cnt, loff_t *ppos)
2342 {
2343 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
2344 }
2345
2346 static ssize_t
2347 ftrace_notrace_write(struct file *file, const char __user *ubuf,
2348 size_t cnt, loff_t *ppos)
2349 {
2350 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
2351 }
2352
2353 static void
2354 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
2355 {
2356 if (unlikely(ftrace_disabled))
2357 return;
2358
2359 mutex_lock(&ftrace_regex_lock);
2360 if (reset)
2361 ftrace_filter_reset(enable);
2362 if (buf)
2363 ftrace_match_records(buf, len, enable);
2364 mutex_unlock(&ftrace_regex_lock);
2365 }
2366
2367 /**
2368 * ftrace_set_filter - set a function to filter on in ftrace
2369 * @buf - the string that holds the function filter text.
2370 * @len - the length of the string.
2371 * @reset - non zero to reset all filters before applying this filter.
2372 *
2373 * Filters denote which functions should be enabled when tracing is enabled.
2374 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2375 */
2376 void ftrace_set_filter(unsigned char *buf, int len, int reset)
2377 {
2378 ftrace_set_regex(buf, len, reset, 1);
2379 }
2380
2381 /**
2382 * ftrace_set_notrace - set a function to not trace in ftrace
2383 * @buf - the string that holds the function notrace text.
2384 * @len - the length of the string.
2385 * @reset - non zero to reset all filters before applying this filter.
2386 *
2387 * Notrace Filters denote which functions should not be enabled when tracing
2388 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2389 * for tracing.
2390 */
2391 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
2392 {
2393 ftrace_set_regex(buf, len, reset, 0);
2394 }
2395
2396 static int
2397 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
2398 {
2399 struct seq_file *m = (struct seq_file *)file->private_data;
2400 struct ftrace_iterator *iter;
2401
2402 mutex_lock(&ftrace_regex_lock);
2403 if (file->f_mode & FMODE_READ) {
2404 iter = m->private;
2405
2406 seq_release(inode, file);
2407 } else
2408 iter = file->private_data;
2409
2410 if (iter->buffer_idx) {
2411 iter->filtered++;
2412 iter->buffer[iter->buffer_idx] = 0;
2413 ftrace_match_records(iter->buffer, iter->buffer_idx, enable);
2414 }
2415
2416 mutex_lock(&ftrace_lock);
2417 if (ftrace_start_up && ftrace_enabled)
2418 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
2419 mutex_unlock(&ftrace_lock);
2420
2421 kfree(iter);
2422 mutex_unlock(&ftrace_regex_lock);
2423 return 0;
2424 }
2425
2426 static int
2427 ftrace_filter_release(struct inode *inode, struct file *file)
2428 {
2429 return ftrace_regex_release(inode, file, 1);
2430 }
2431
2432 static int
2433 ftrace_notrace_release(struct inode *inode, struct file *file)
2434 {
2435 return ftrace_regex_release(inode, file, 0);
2436 }
2437
2438 static const struct file_operations ftrace_avail_fops = {
2439 .open = ftrace_avail_open,
2440 .read = seq_read,
2441 .llseek = seq_lseek,
2442 .release = ftrace_avail_release,
2443 };
2444
2445 static const struct file_operations ftrace_failures_fops = {
2446 .open = ftrace_failures_open,
2447 .read = seq_read,
2448 .llseek = seq_lseek,
2449 .release = ftrace_avail_release,
2450 };
2451
2452 static const struct file_operations ftrace_filter_fops = {
2453 .open = ftrace_filter_open,
2454 .read = seq_read,
2455 .write = ftrace_filter_write,
2456 .llseek = ftrace_regex_lseek,
2457 .release = ftrace_filter_release,
2458 };
2459
2460 static const struct file_operations ftrace_notrace_fops = {
2461 .open = ftrace_notrace_open,
2462 .read = seq_read,
2463 .write = ftrace_notrace_write,
2464 .llseek = ftrace_regex_lseek,
2465 .release = ftrace_notrace_release,
2466 };
2467
2468 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2469
2470 static DEFINE_MUTEX(graph_lock);
2471
2472 int ftrace_graph_count;
2473 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
2474
2475 static void *
2476 g_next(struct seq_file *m, void *v, loff_t *pos)
2477 {
2478 unsigned long *array = m->private;
2479 int index = *pos;
2480
2481 (*pos)++;
2482
2483 if (index >= ftrace_graph_count)
2484 return NULL;
2485
2486 return &array[index];
2487 }
2488
2489 static void *g_start(struct seq_file *m, loff_t *pos)
2490 {
2491 void *p = NULL;
2492
2493 mutex_lock(&graph_lock);
2494
2495 /* Nothing, tell g_show to print all functions are enabled */
2496 if (!ftrace_graph_count && !*pos)
2497 return (void *)1;
2498
2499 p = g_next(m, p, pos);
2500
2501 return p;
2502 }
2503
2504 static void g_stop(struct seq_file *m, void *p)
2505 {
2506 mutex_unlock(&graph_lock);
2507 }
2508
2509 static int g_show(struct seq_file *m, void *v)
2510 {
2511 unsigned long *ptr = v;
2512 char str[KSYM_SYMBOL_LEN];
2513
2514 if (!ptr)
2515 return 0;
2516
2517 if (ptr == (unsigned long *)1) {
2518 seq_printf(m, "#### all functions enabled ####\n");
2519 return 0;
2520 }
2521
2522 kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
2523
2524 seq_printf(m, "%s\n", str);
2525
2526 return 0;
2527 }
2528
2529 static struct seq_operations ftrace_graph_seq_ops = {
2530 .start = g_start,
2531 .next = g_next,
2532 .stop = g_stop,
2533 .show = g_show,
2534 };
2535
2536 static int
2537 ftrace_graph_open(struct inode *inode, struct file *file)
2538 {
2539 int ret = 0;
2540
2541 if (unlikely(ftrace_disabled))
2542 return -ENODEV;
2543
2544 mutex_lock(&graph_lock);
2545 if ((file->f_mode & FMODE_WRITE) &&
2546 !(file->f_flags & O_APPEND)) {
2547 ftrace_graph_count = 0;
2548 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
2549 }
2550
2551 if (file->f_mode & FMODE_READ) {
2552 ret = seq_open(file, &ftrace_graph_seq_ops);
2553 if (!ret) {
2554 struct seq_file *m = file->private_data;
2555 m->private = ftrace_graph_funcs;
2556 }
2557 } else
2558 file->private_data = ftrace_graph_funcs;
2559 mutex_unlock(&graph_lock);
2560
2561 return ret;
2562 }
2563
2564 static int
2565 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
2566 {
2567 struct dyn_ftrace *rec;
2568 struct ftrace_page *pg;
2569 int search_len;
2570 int found = 0;
2571 int type, not;
2572 char *search;
2573 bool exists;
2574 int i;
2575
2576 if (ftrace_disabled)
2577 return -ENODEV;
2578
2579 /* decode regex */
2580 type = ftrace_setup_glob(buffer, strlen(buffer), &search, &not);
2581 if (not)
2582 return -EINVAL;
2583
2584 search_len = strlen(search);
2585
2586 mutex_lock(&ftrace_lock);
2587 do_for_each_ftrace_rec(pg, rec) {
2588
2589 if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
2590 break;
2591
2592 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
2593 continue;
2594
2595 if (ftrace_match_record(rec, search, search_len, type)) {
2596 /* ensure it is not already in the array */
2597 exists = false;
2598 for (i = 0; i < *idx; i++)
2599 if (array[i] == rec->ip) {
2600 exists = true;
2601 break;
2602 }
2603 if (!exists) {
2604 array[(*idx)++] = rec->ip;
2605 found = 1;
2606 }
2607 }
2608 } while_for_each_ftrace_rec();
2609
2610 mutex_unlock(&ftrace_lock);
2611
2612 return found ? 0 : -EINVAL;
2613 }
2614
2615 static ssize_t
2616 ftrace_graph_write(struct file *file, const char __user *ubuf,
2617 size_t cnt, loff_t *ppos)
2618 {
2619 unsigned char buffer[FTRACE_BUFF_MAX+1];
2620 unsigned long *array;
2621 size_t read = 0;
2622 ssize_t ret;
2623 int index = 0;
2624 char ch;
2625
2626 if (!cnt || cnt < 0)
2627 return 0;
2628
2629 mutex_lock(&graph_lock);
2630
2631 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
2632 ret = -EBUSY;
2633 goto out;
2634 }
2635
2636 if (file->f_mode & FMODE_READ) {
2637 struct seq_file *m = file->private_data;
2638 array = m->private;
2639 } else
2640 array = file->private_data;
2641
2642 ret = get_user(ch, ubuf++);
2643 if (ret)
2644 goto out;
2645 read++;
2646 cnt--;
2647
2648 /* skip white space */
2649 while (cnt && isspace(ch)) {
2650 ret = get_user(ch, ubuf++);
2651 if (ret)
2652 goto out;
2653 read++;
2654 cnt--;
2655 }
2656
2657 if (isspace(ch)) {
2658 *ppos += read;
2659 ret = read;
2660 goto out;
2661 }
2662
2663 while (cnt && !isspace(ch)) {
2664 if (index < FTRACE_BUFF_MAX)
2665 buffer[index++] = ch;
2666 else {
2667 ret = -EINVAL;
2668 goto out;
2669 }
2670 ret = get_user(ch, ubuf++);
2671 if (ret)
2672 goto out;
2673 read++;
2674 cnt--;
2675 }
2676 buffer[index] = 0;
2677
2678 /* we allow only one expression at a time */
2679 ret = ftrace_set_func(array, &ftrace_graph_count, buffer);
2680 if (ret)
2681 goto out;
2682
2683 file->f_pos += read;
2684
2685 ret = read;
2686 out:
2687 mutex_unlock(&graph_lock);
2688
2689 return ret;
2690 }
2691
2692 static const struct file_operations ftrace_graph_fops = {
2693 .open = ftrace_graph_open,
2694 .read = seq_read,
2695 .write = ftrace_graph_write,
2696 };
2697 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2698
2699 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
2700 {
2701
2702 trace_create_file("available_filter_functions", 0444,
2703 d_tracer, NULL, &ftrace_avail_fops);
2704
2705 trace_create_file("failures", 0444,
2706 d_tracer, NULL, &ftrace_failures_fops);
2707
2708 trace_create_file("set_ftrace_filter", 0644, d_tracer,
2709 NULL, &ftrace_filter_fops);
2710
2711 trace_create_file("set_ftrace_notrace", 0644, d_tracer,
2712 NULL, &ftrace_notrace_fops);
2713
2714 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2715 trace_create_file("set_graph_function", 0444, d_tracer,
2716 NULL,
2717 &ftrace_graph_fops);
2718 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2719
2720 return 0;
2721 }
2722
2723 static int ftrace_convert_nops(struct module *mod,
2724 unsigned long *start,
2725 unsigned long *end)
2726 {
2727 unsigned long *p;
2728 unsigned long addr;
2729 unsigned long flags;
2730
2731 mutex_lock(&ftrace_lock);
2732 p = start;
2733 while (p < end) {
2734 addr = ftrace_call_adjust(*p++);
2735 /*
2736 * Some architecture linkers will pad between
2737 * the different mcount_loc sections of different
2738 * object files to satisfy alignments.
2739 * Skip any NULL pointers.
2740 */
2741 if (!addr)
2742 continue;
2743 ftrace_record_ip(addr);
2744 }
2745
2746 /* disable interrupts to prevent kstop machine */
2747 local_irq_save(flags);
2748 ftrace_update_code(mod);
2749 local_irq_restore(flags);
2750 mutex_unlock(&ftrace_lock);
2751
2752 return 0;
2753 }
2754
2755 void ftrace_init_module(struct module *mod,
2756 unsigned long *start, unsigned long *end)
2757 {
2758 if (ftrace_disabled || start == end)
2759 return;
2760 ftrace_convert_nops(mod, start, end);
2761 }
2762
2763 extern unsigned long __start_mcount_loc[];
2764 extern unsigned long __stop_mcount_loc[];
2765
2766 void __init ftrace_init(void)
2767 {
2768 unsigned long count, addr, flags;
2769 int ret;
2770
2771 /* Keep the ftrace pointer to the stub */
2772 addr = (unsigned long)ftrace_stub;
2773
2774 local_irq_save(flags);
2775 ftrace_dyn_arch_init(&addr);
2776 local_irq_restore(flags);
2777
2778 /* ftrace_dyn_arch_init places the return code in addr */
2779 if (addr)
2780 goto failed;
2781
2782 count = __stop_mcount_loc - __start_mcount_loc;
2783
2784 ret = ftrace_dyn_table_alloc(count);
2785 if (ret)
2786 goto failed;
2787
2788 last_ftrace_enabled = ftrace_enabled = 1;
2789
2790 ret = ftrace_convert_nops(NULL,
2791 __start_mcount_loc,
2792 __stop_mcount_loc);
2793
2794 return;
2795 failed:
2796 ftrace_disabled = 1;
2797 }
2798
2799 #else
2800
2801 static int __init ftrace_nodyn_init(void)
2802 {
2803 ftrace_enabled = 1;
2804 return 0;
2805 }
2806 device_initcall(ftrace_nodyn_init);
2807
2808 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
2809 static inline void ftrace_startup_enable(int command) { }
2810 /* Keep as macros so we do not need to define the commands */
2811 # define ftrace_startup(command) do { } while (0)
2812 # define ftrace_shutdown(command) do { } while (0)
2813 # define ftrace_startup_sysctl() do { } while (0)
2814 # define ftrace_shutdown_sysctl() do { } while (0)
2815 #endif /* CONFIG_DYNAMIC_FTRACE */
2816
2817 static ssize_t
2818 ftrace_pid_read(struct file *file, char __user *ubuf,
2819 size_t cnt, loff_t *ppos)
2820 {
2821 char buf[64];
2822 int r;
2823
2824 if (ftrace_pid_trace == ftrace_swapper_pid)
2825 r = sprintf(buf, "swapper tasks\n");
2826 else if (ftrace_pid_trace)
2827 r = sprintf(buf, "%u\n", pid_vnr(ftrace_pid_trace));
2828 else
2829 r = sprintf(buf, "no pid\n");
2830
2831 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2832 }
2833
2834 static void clear_ftrace_swapper(void)
2835 {
2836 struct task_struct *p;
2837 int cpu;
2838
2839 get_online_cpus();
2840 for_each_online_cpu(cpu) {
2841 p = idle_task(cpu);
2842 clear_tsk_trace_trace(p);
2843 }
2844 put_online_cpus();
2845 }
2846
2847 static void set_ftrace_swapper(void)
2848 {
2849 struct task_struct *p;
2850 int cpu;
2851
2852 get_online_cpus();
2853 for_each_online_cpu(cpu) {
2854 p = idle_task(cpu);
2855 set_tsk_trace_trace(p);
2856 }
2857 put_online_cpus();
2858 }
2859
2860 static void clear_ftrace_pid(struct pid *pid)
2861 {
2862 struct task_struct *p;
2863
2864 rcu_read_lock();
2865 do_each_pid_task(pid, PIDTYPE_PID, p) {
2866 clear_tsk_trace_trace(p);
2867 } while_each_pid_task(pid, PIDTYPE_PID, p);
2868 rcu_read_unlock();
2869
2870 put_pid(pid);
2871 }
2872
2873 static void set_ftrace_pid(struct pid *pid)
2874 {
2875 struct task_struct *p;
2876
2877 rcu_read_lock();
2878 do_each_pid_task(pid, PIDTYPE_PID, p) {
2879 set_tsk_trace_trace(p);
2880 } while_each_pid_task(pid, PIDTYPE_PID, p);
2881 rcu_read_unlock();
2882 }
2883
2884 static void clear_ftrace_pid_task(struct pid **pid)
2885 {
2886 if (*pid == ftrace_swapper_pid)
2887 clear_ftrace_swapper();
2888 else
2889 clear_ftrace_pid(*pid);
2890
2891 *pid = NULL;
2892 }
2893
2894 static void set_ftrace_pid_task(struct pid *pid)
2895 {
2896 if (pid == ftrace_swapper_pid)
2897 set_ftrace_swapper();
2898 else
2899 set_ftrace_pid(pid);
2900 }
2901
2902 static ssize_t
2903 ftrace_pid_write(struct file *filp, const char __user *ubuf,
2904 size_t cnt, loff_t *ppos)
2905 {
2906 struct pid *pid;
2907 char buf[64];
2908 long val;
2909 int ret;
2910
2911 if (cnt >= sizeof(buf))
2912 return -EINVAL;
2913
2914 if (copy_from_user(&buf, ubuf, cnt))
2915 return -EFAULT;
2916
2917 buf[cnt] = 0;
2918
2919 ret = strict_strtol(buf, 10, &val);
2920 if (ret < 0)
2921 return ret;
2922
2923 mutex_lock(&ftrace_lock);
2924 if (val < 0) {
2925 /* disable pid tracing */
2926 if (!ftrace_pid_trace)
2927 goto out;
2928
2929 clear_ftrace_pid_task(&ftrace_pid_trace);
2930
2931 } else {
2932 /* swapper task is special */
2933 if (!val) {
2934 pid = ftrace_swapper_pid;
2935 if (pid == ftrace_pid_trace)
2936 goto out;
2937 } else {
2938 pid = find_get_pid(val);
2939
2940 if (pid == ftrace_pid_trace) {
2941 put_pid(pid);
2942 goto out;
2943 }
2944 }
2945
2946 if (ftrace_pid_trace)
2947 clear_ftrace_pid_task(&ftrace_pid_trace);
2948
2949 if (!pid)
2950 goto out;
2951
2952 ftrace_pid_trace = pid;
2953
2954 set_ftrace_pid_task(ftrace_pid_trace);
2955 }
2956
2957 /* update the function call */
2958 ftrace_update_pid_func();
2959 ftrace_startup_enable(0);
2960
2961 out:
2962 mutex_unlock(&ftrace_lock);
2963
2964 return cnt;
2965 }
2966
2967 static const struct file_operations ftrace_pid_fops = {
2968 .read = ftrace_pid_read,
2969 .write = ftrace_pid_write,
2970 };
2971
2972 static __init int ftrace_init_debugfs(void)
2973 {
2974 struct dentry *d_tracer;
2975
2976 d_tracer = tracing_init_dentry();
2977 if (!d_tracer)
2978 return 0;
2979
2980 ftrace_init_dyn_debugfs(d_tracer);
2981
2982 trace_create_file("set_ftrace_pid", 0644, d_tracer,
2983 NULL, &ftrace_pid_fops);
2984
2985 ftrace_profile_debugfs(d_tracer);
2986
2987 return 0;
2988 }
2989 fs_initcall(ftrace_init_debugfs);
2990
2991 /**
2992 * ftrace_kill - kill ftrace
2993 *
2994 * This function should be used by panic code. It stops ftrace
2995 * but in a not so nice way. If you need to simply kill ftrace
2996 * from a non-atomic section, use ftrace_kill.
2997 */
2998 void ftrace_kill(void)
2999 {
3000 ftrace_disabled = 1;
3001 ftrace_enabled = 0;
3002 clear_ftrace_function();
3003 }
3004
3005 /**
3006 * register_ftrace_function - register a function for profiling
3007 * @ops - ops structure that holds the function for profiling.
3008 *
3009 * Register a function to be called by all functions in the
3010 * kernel.
3011 *
3012 * Note: @ops->func and all the functions it calls must be labeled
3013 * with "notrace", otherwise it will go into a
3014 * recursive loop.
3015 */
3016 int register_ftrace_function(struct ftrace_ops *ops)
3017 {
3018 int ret;
3019
3020 if (unlikely(ftrace_disabled))
3021 return -1;
3022
3023 mutex_lock(&ftrace_lock);
3024
3025 ret = __register_ftrace_function(ops);
3026 ftrace_startup(0);
3027
3028 mutex_unlock(&ftrace_lock);
3029 return ret;
3030 }
3031
3032 /**
3033 * unregister_ftrace_function - unregister a function for profiling.
3034 * @ops - ops structure that holds the function to unregister
3035 *
3036 * Unregister a function that was added to be called by ftrace profiling.
3037 */
3038 int unregister_ftrace_function(struct ftrace_ops *ops)
3039 {
3040 int ret;
3041
3042 mutex_lock(&ftrace_lock);
3043 ret = __unregister_ftrace_function(ops);
3044 ftrace_shutdown(0);
3045 mutex_unlock(&ftrace_lock);
3046
3047 return ret;
3048 }
3049
3050 int
3051 ftrace_enable_sysctl(struct ctl_table *table, int write,
3052 struct file *file, void __user *buffer, size_t *lenp,
3053 loff_t *ppos)
3054 {
3055 int ret;
3056
3057 if (unlikely(ftrace_disabled))
3058 return -ENODEV;
3059
3060 mutex_lock(&ftrace_lock);
3061
3062 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
3063
3064 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
3065 goto out;
3066
3067 last_ftrace_enabled = ftrace_enabled;
3068
3069 if (ftrace_enabled) {
3070
3071 ftrace_startup_sysctl();
3072
3073 /* we are starting ftrace again */
3074 if (ftrace_list != &ftrace_list_end) {
3075 if (ftrace_list->next == &ftrace_list_end)
3076 ftrace_trace_function = ftrace_list->func;
3077 else
3078 ftrace_trace_function = ftrace_list_func;
3079 }
3080
3081 } else {
3082 /* stopping ftrace calls (just send to ftrace_stub) */
3083 ftrace_trace_function = ftrace_stub;
3084
3085 ftrace_shutdown_sysctl();
3086 }
3087
3088 out:
3089 mutex_unlock(&ftrace_lock);
3090 return ret;
3091 }
3092
3093 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3094
3095 static atomic_t ftrace_graph_active;
3096 static struct notifier_block ftrace_suspend_notifier;
3097
3098 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
3099 {
3100 return 0;
3101 }
3102
3103 /* The callbacks that hook a function */
3104 trace_func_graph_ret_t ftrace_graph_return =
3105 (trace_func_graph_ret_t)ftrace_stub;
3106 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
3107
3108 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
3109 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
3110 {
3111 int i;
3112 int ret = 0;
3113 unsigned long flags;
3114 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
3115 struct task_struct *g, *t;
3116
3117 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
3118 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
3119 * sizeof(struct ftrace_ret_stack),
3120 GFP_KERNEL);
3121 if (!ret_stack_list[i]) {
3122 start = 0;
3123 end = i;
3124 ret = -ENOMEM;
3125 goto free;
3126 }
3127 }
3128
3129 read_lock_irqsave(&tasklist_lock, flags);
3130 do_each_thread(g, t) {
3131 if (start == end) {
3132 ret = -EAGAIN;
3133 goto unlock;
3134 }
3135
3136 if (t->ret_stack == NULL) {
3137 t->curr_ret_stack = -1;
3138 /* Make sure IRQs see the -1 first: */
3139 barrier();
3140 t->ret_stack = ret_stack_list[start++];
3141 atomic_set(&t->tracing_graph_pause, 0);
3142 atomic_set(&t->trace_overrun, 0);
3143 }
3144 } while_each_thread(g, t);
3145
3146 unlock:
3147 read_unlock_irqrestore(&tasklist_lock, flags);
3148 free:
3149 for (i = start; i < end; i++)
3150 kfree(ret_stack_list[i]);
3151 return ret;
3152 }
3153
3154 static void
3155 ftrace_graph_probe_sched_switch(struct rq *__rq, struct task_struct *prev,
3156 struct task_struct *next)
3157 {
3158 unsigned long long timestamp;
3159 int index;
3160
3161 /*
3162 * Does the user want to count the time a function was asleep.
3163 * If so, do not update the time stamps.
3164 */
3165 if (trace_flags & TRACE_ITER_SLEEP_TIME)
3166 return;
3167
3168 timestamp = trace_clock_local();
3169
3170 prev->ftrace_timestamp = timestamp;
3171
3172 /* only process tasks that we timestamped */
3173 if (!next->ftrace_timestamp)
3174 return;
3175
3176 /*
3177 * Update all the counters in next to make up for the
3178 * time next was sleeping.
3179 */
3180 timestamp -= next->ftrace_timestamp;
3181
3182 for (index = next->curr_ret_stack; index >= 0; index--)
3183 next->ret_stack[index].calltime += timestamp;
3184 }
3185
3186 /* Allocate a return stack for each task */
3187 static int start_graph_tracing(void)
3188 {
3189 struct ftrace_ret_stack **ret_stack_list;
3190 int ret, cpu;
3191
3192 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
3193 sizeof(struct ftrace_ret_stack *),
3194 GFP_KERNEL);
3195
3196 if (!ret_stack_list)
3197 return -ENOMEM;
3198
3199 /* The cpu_boot init_task->ret_stack will never be freed */
3200 for_each_online_cpu(cpu)
3201 ftrace_graph_init_task(idle_task(cpu));
3202
3203 do {
3204 ret = alloc_retstack_tasklist(ret_stack_list);
3205 } while (ret == -EAGAIN);
3206
3207 if (!ret) {
3208 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch);
3209 if (ret)
3210 pr_info("ftrace_graph: Couldn't activate tracepoint"
3211 " probe to kernel_sched_switch\n");
3212 }
3213
3214 kfree(ret_stack_list);
3215 return ret;
3216 }
3217
3218 /*
3219 * Hibernation protection.
3220 * The state of the current task is too much unstable during
3221 * suspend/restore to disk. We want to protect against that.
3222 */
3223 static int
3224 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
3225 void *unused)
3226 {
3227 switch (state) {
3228 case PM_HIBERNATION_PREPARE:
3229 pause_graph_tracing();
3230 break;
3231
3232 case PM_POST_HIBERNATION:
3233 unpause_graph_tracing();
3234 break;
3235 }
3236 return NOTIFY_DONE;
3237 }
3238
3239 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
3240 trace_func_graph_ent_t entryfunc)
3241 {
3242 int ret = 0;
3243
3244 mutex_lock(&ftrace_lock);
3245
3246 /* we currently allow only one tracer registered at a time */
3247 if (atomic_read(&ftrace_graph_active)) {
3248 ret = -EBUSY;
3249 goto out;
3250 }
3251
3252 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
3253 register_pm_notifier(&ftrace_suspend_notifier);
3254
3255 atomic_inc(&ftrace_graph_active);
3256 ret = start_graph_tracing();
3257 if (ret) {
3258 atomic_dec(&ftrace_graph_active);
3259 goto out;
3260 }
3261
3262 ftrace_graph_return = retfunc;
3263 ftrace_graph_entry = entryfunc;
3264
3265 ftrace_startup(FTRACE_START_FUNC_RET);
3266
3267 out:
3268 mutex_unlock(&ftrace_lock);
3269 return ret;
3270 }
3271
3272 void unregister_ftrace_graph(void)
3273 {
3274 mutex_lock(&ftrace_lock);
3275
3276 if (!unlikely(atomic_read(&ftrace_graph_active)))
3277 goto out;
3278
3279 atomic_dec(&ftrace_graph_active);
3280 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch);
3281 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
3282 ftrace_graph_entry = ftrace_graph_entry_stub;
3283 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
3284 unregister_pm_notifier(&ftrace_suspend_notifier);
3285
3286 out:
3287 mutex_unlock(&ftrace_lock);
3288 }
3289
3290 /* Allocate a return stack for newly created task */
3291 void ftrace_graph_init_task(struct task_struct *t)
3292 {
3293 if (atomic_read(&ftrace_graph_active)) {
3294 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
3295 * sizeof(struct ftrace_ret_stack),
3296 GFP_KERNEL);
3297 if (!t->ret_stack)
3298 return;
3299 t->curr_ret_stack = -1;
3300 atomic_set(&t->tracing_graph_pause, 0);
3301 atomic_set(&t->trace_overrun, 0);
3302 t->ftrace_timestamp = 0;
3303 } else
3304 t->ret_stack = NULL;
3305 }
3306
3307 void ftrace_graph_exit_task(struct task_struct *t)
3308 {
3309 struct ftrace_ret_stack *ret_stack = t->ret_stack;
3310
3311 t->ret_stack = NULL;
3312 /* NULL must become visible to IRQs before we free it: */
3313 barrier();
3314
3315 kfree(ret_stack);
3316 }
3317
3318 void ftrace_graph_stop(void)
3319 {
3320 ftrace_stop();
3321 }
3322 #endif
3323
This page took 0.13956 seconds and 6 git commands to generate.