tracing: remove on the fly allocator from function profiler
[deliverable/linux.git] / kernel / trace / ftrace.c
1 /*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/kprobes.h>
26 #include <linux/ftrace.h>
27 #include <linux/sysctl.h>
28 #include <linux/ctype.h>
29 #include <linux/list.h>
30 #include <linux/hash.h>
31
32 #include <trace/sched.h>
33
34 #include <asm/ftrace.h>
35
36 #include "trace_output.h"
37 #include "trace_stat.h"
38
39 #define FTRACE_WARN_ON(cond) \
40 do { \
41 if (WARN_ON(cond)) \
42 ftrace_kill(); \
43 } while (0)
44
45 #define FTRACE_WARN_ON_ONCE(cond) \
46 do { \
47 if (WARN_ON_ONCE(cond)) \
48 ftrace_kill(); \
49 } while (0)
50
51 /* hash bits for specific function selection */
52 #define FTRACE_HASH_BITS 7
53 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
54
55 /* ftrace_enabled is a method to turn ftrace on or off */
56 int ftrace_enabled __read_mostly;
57 static int last_ftrace_enabled;
58
59 /* Quick disabling of function tracer. */
60 int function_trace_stop;
61
62 /*
63 * ftrace_disabled is set when an anomaly is discovered.
64 * ftrace_disabled is much stronger than ftrace_enabled.
65 */
66 static int ftrace_disabled __read_mostly;
67
68 static DEFINE_MUTEX(ftrace_lock);
69
70 static struct ftrace_ops ftrace_list_end __read_mostly =
71 {
72 .func = ftrace_stub,
73 };
74
75 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
76 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
77 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
78 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
79
80 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
81 {
82 struct ftrace_ops *op = ftrace_list;
83
84 /* in case someone actually ports this to alpha! */
85 read_barrier_depends();
86
87 while (op != &ftrace_list_end) {
88 /* silly alpha */
89 read_barrier_depends();
90 op->func(ip, parent_ip);
91 op = op->next;
92 };
93 }
94
95 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
96 {
97 if (!test_tsk_trace_trace(current))
98 return;
99
100 ftrace_pid_function(ip, parent_ip);
101 }
102
103 static void set_ftrace_pid_function(ftrace_func_t func)
104 {
105 /* do not set ftrace_pid_function to itself! */
106 if (func != ftrace_pid_func)
107 ftrace_pid_function = func;
108 }
109
110 /**
111 * clear_ftrace_function - reset the ftrace function
112 *
113 * This NULLs the ftrace function and in essence stops
114 * tracing. There may be lag
115 */
116 void clear_ftrace_function(void)
117 {
118 ftrace_trace_function = ftrace_stub;
119 __ftrace_trace_function = ftrace_stub;
120 ftrace_pid_function = ftrace_stub;
121 }
122
123 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
124 /*
125 * For those archs that do not test ftrace_trace_stop in their
126 * mcount call site, we need to do it from C.
127 */
128 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
129 {
130 if (function_trace_stop)
131 return;
132
133 __ftrace_trace_function(ip, parent_ip);
134 }
135 #endif
136
137 static int __register_ftrace_function(struct ftrace_ops *ops)
138 {
139 ops->next = ftrace_list;
140 /*
141 * We are entering ops into the ftrace_list but another
142 * CPU might be walking that list. We need to make sure
143 * the ops->next pointer is valid before another CPU sees
144 * the ops pointer included into the ftrace_list.
145 */
146 smp_wmb();
147 ftrace_list = ops;
148
149 if (ftrace_enabled) {
150 ftrace_func_t func;
151
152 if (ops->next == &ftrace_list_end)
153 func = ops->func;
154 else
155 func = ftrace_list_func;
156
157 if (ftrace_pid_trace) {
158 set_ftrace_pid_function(func);
159 func = ftrace_pid_func;
160 }
161
162 /*
163 * For one func, simply call it directly.
164 * For more than one func, call the chain.
165 */
166 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
167 ftrace_trace_function = func;
168 #else
169 __ftrace_trace_function = func;
170 ftrace_trace_function = ftrace_test_stop_func;
171 #endif
172 }
173
174 return 0;
175 }
176
177 static int __unregister_ftrace_function(struct ftrace_ops *ops)
178 {
179 struct ftrace_ops **p;
180
181 /*
182 * If we are removing the last function, then simply point
183 * to the ftrace_stub.
184 */
185 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
186 ftrace_trace_function = ftrace_stub;
187 ftrace_list = &ftrace_list_end;
188 return 0;
189 }
190
191 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
192 if (*p == ops)
193 break;
194
195 if (*p != ops)
196 return -1;
197
198 *p = (*p)->next;
199
200 if (ftrace_enabled) {
201 /* If we only have one func left, then call that directly */
202 if (ftrace_list->next == &ftrace_list_end) {
203 ftrace_func_t func = ftrace_list->func;
204
205 if (ftrace_pid_trace) {
206 set_ftrace_pid_function(func);
207 func = ftrace_pid_func;
208 }
209 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
210 ftrace_trace_function = func;
211 #else
212 __ftrace_trace_function = func;
213 #endif
214 }
215 }
216
217 return 0;
218 }
219
220 static void ftrace_update_pid_func(void)
221 {
222 ftrace_func_t func;
223
224 if (ftrace_trace_function == ftrace_stub)
225 return;
226
227 func = ftrace_trace_function;
228
229 if (ftrace_pid_trace) {
230 set_ftrace_pid_function(func);
231 func = ftrace_pid_func;
232 } else {
233 if (func == ftrace_pid_func)
234 func = ftrace_pid_function;
235 }
236
237 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
238 ftrace_trace_function = func;
239 #else
240 __ftrace_trace_function = func;
241 #endif
242 }
243
244 #ifdef CONFIG_FUNCTION_PROFILER
245 struct ftrace_profile {
246 struct hlist_node node;
247 unsigned long ip;
248 unsigned long counter;
249 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
250 unsigned long long time;
251 #endif
252 };
253
254 struct ftrace_profile_page {
255 struct ftrace_profile_page *next;
256 unsigned long index;
257 struct ftrace_profile records[];
258 };
259
260 struct ftrace_profile_stat {
261 atomic_t disabled;
262 struct hlist_head *hash;
263 struct ftrace_profile_page *pages;
264 struct ftrace_profile_page *start;
265 struct tracer_stat stat;
266 };
267
268 #define PROFILE_RECORDS_SIZE \
269 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
270
271 #define PROFILES_PER_PAGE \
272 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
273
274 static int ftrace_profile_bits __read_mostly;
275 static int ftrace_profile_enabled __read_mostly;
276
277 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
278 static DEFINE_MUTEX(ftrace_profile_lock);
279
280 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
281
282 #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
283
284 static void *
285 function_stat_next(void *v, int idx)
286 {
287 struct ftrace_profile *rec = v;
288 struct ftrace_profile_page *pg;
289
290 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
291
292 again:
293 rec++;
294 if ((void *)rec >= (void *)&pg->records[pg->index]) {
295 pg = pg->next;
296 if (!pg)
297 return NULL;
298 rec = &pg->records[0];
299 if (!rec->counter)
300 goto again;
301 }
302
303 return rec;
304 }
305
306 static void *function_stat_start(struct tracer_stat *trace)
307 {
308 struct ftrace_profile_stat *stat =
309 container_of(trace, struct ftrace_profile_stat, stat);
310
311 if (!stat || !stat->start)
312 return NULL;
313
314 return function_stat_next(&stat->start->records[0], 0);
315 }
316
317 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
318 /* function graph compares on total time */
319 static int function_stat_cmp(void *p1, void *p2)
320 {
321 struct ftrace_profile *a = p1;
322 struct ftrace_profile *b = p2;
323
324 if (a->time < b->time)
325 return -1;
326 if (a->time > b->time)
327 return 1;
328 else
329 return 0;
330 }
331 #else
332 /* not function graph compares against hits */
333 static int function_stat_cmp(void *p1, void *p2)
334 {
335 struct ftrace_profile *a = p1;
336 struct ftrace_profile *b = p2;
337
338 if (a->counter < b->counter)
339 return -1;
340 if (a->counter > b->counter)
341 return 1;
342 else
343 return 0;
344 }
345 #endif
346
347 static int function_stat_headers(struct seq_file *m)
348 {
349 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
350 seq_printf(m, " Function Hit Time\n"
351 " -------- --- ----\n");
352 #else
353 seq_printf(m, " Function Hit\n"
354 " -------- ---\n");
355 #endif
356 return 0;
357 }
358
359 static int function_stat_show(struct seq_file *m, void *v)
360 {
361 struct ftrace_profile *rec = v;
362 char str[KSYM_SYMBOL_LEN];
363 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
364 static struct trace_seq s;
365 static DEFINE_MUTEX(mutex);
366
367 mutex_lock(&mutex);
368 trace_seq_init(&s);
369 trace_print_graph_duration(rec->time, &s);
370 #endif
371
372 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
373 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
374
375 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
376 seq_printf(m, " ");
377 trace_print_seq(m, &s);
378 mutex_unlock(&mutex);
379 #endif
380 seq_putc(m, '\n');
381
382 return 0;
383 }
384
385 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
386 {
387 struct ftrace_profile_page *pg;
388
389 pg = stat->pages = stat->start;
390
391 while (pg) {
392 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
393 pg->index = 0;
394 pg = pg->next;
395 }
396
397 memset(stat->hash, 0,
398 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
399 }
400
401 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
402 {
403 struct ftrace_profile_page *pg;
404 int functions;
405 int pages;
406 int i;
407
408 /* If we already allocated, do nothing */
409 if (stat->pages)
410 return 0;
411
412 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
413 if (!stat->pages)
414 return -ENOMEM;
415
416 #ifdef CONFIG_DYNAMIC_FTRACE
417 functions = ftrace_update_tot_cnt;
418 #else
419 /*
420 * We do not know the number of functions that exist because
421 * dynamic tracing is what counts them. With past experience
422 * we have around 20K functions. That should be more than enough.
423 * It is highly unlikely we will execute every function in
424 * the kernel.
425 */
426 functions = 20000;
427 #endif
428
429 pg = stat->start = stat->pages;
430
431 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
432
433 for (i = 0; i < pages; i++) {
434 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
435 if (!pg->next)
436 goto out_free;
437 pg = pg->next;
438 }
439
440 return 0;
441
442 out_free:
443 pg = stat->start;
444 while (pg) {
445 unsigned long tmp = (unsigned long)pg;
446
447 pg = pg->next;
448 free_page(tmp);
449 }
450
451 free_page((unsigned long)stat->pages);
452 stat->pages = NULL;
453 stat->start = NULL;
454
455 return -ENOMEM;
456 }
457
458 static int ftrace_profile_init_cpu(int cpu)
459 {
460 struct ftrace_profile_stat *stat;
461 int size;
462
463 stat = &per_cpu(ftrace_profile_stats, cpu);
464
465 if (stat->hash) {
466 /* If the profile is already created, simply reset it */
467 ftrace_profile_reset(stat);
468 return 0;
469 }
470
471 /*
472 * We are profiling all functions, but usually only a few thousand
473 * functions are hit. We'll make a hash of 1024 items.
474 */
475 size = FTRACE_PROFILE_HASH_SIZE;
476
477 stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
478
479 if (!stat->hash)
480 return -ENOMEM;
481
482 if (!ftrace_profile_bits) {
483 size--;
484
485 for (; size; size >>= 1)
486 ftrace_profile_bits++;
487 }
488
489 /* Preallocate the function profiling pages */
490 if (ftrace_profile_pages_init(stat) < 0) {
491 kfree(stat->hash);
492 stat->hash = NULL;
493 return -ENOMEM;
494 }
495
496 return 0;
497 }
498
499 static int ftrace_profile_init(void)
500 {
501 int cpu;
502 int ret = 0;
503
504 for_each_online_cpu(cpu) {
505 ret = ftrace_profile_init_cpu(cpu);
506 if (ret)
507 break;
508 }
509
510 return ret;
511 }
512
513 /* interrupts must be disabled */
514 static struct ftrace_profile *
515 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
516 {
517 struct ftrace_profile *rec;
518 struct hlist_head *hhd;
519 struct hlist_node *n;
520 unsigned long key;
521
522 key = hash_long(ip, ftrace_profile_bits);
523 hhd = &stat->hash[key];
524
525 if (hlist_empty(hhd))
526 return NULL;
527
528 hlist_for_each_entry_rcu(rec, n, hhd, node) {
529 if (rec->ip == ip)
530 return rec;
531 }
532
533 return NULL;
534 }
535
536 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
537 struct ftrace_profile *rec)
538 {
539 unsigned long key;
540
541 key = hash_long(rec->ip, ftrace_profile_bits);
542 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
543 }
544
545 /*
546 * The memory is already allocated, this simply finds a new record to use.
547 */
548 static struct ftrace_profile *
549 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
550 {
551 struct ftrace_profile *rec = NULL;
552
553 /* prevent recursion (from NMIs) */
554 if (atomic_inc_return(&stat->disabled) != 1)
555 goto out;
556
557 /*
558 * Try to find the function again since an NMI
559 * could have added it
560 */
561 rec = ftrace_find_profiled_func(stat, ip);
562 if (rec)
563 goto out;
564
565 if (stat->pages->index == PROFILES_PER_PAGE) {
566 if (!stat->pages->next)
567 goto out;
568 stat->pages = stat->pages->next;
569 }
570
571 rec = &stat->pages->records[stat->pages->index++];
572 rec->ip = ip;
573 ftrace_add_profile(stat, rec);
574
575 out:
576 atomic_dec(&stat->disabled);
577
578 return rec;
579 }
580
581 static void
582 function_profile_call(unsigned long ip, unsigned long parent_ip)
583 {
584 struct ftrace_profile_stat *stat;
585 struct ftrace_profile *rec;
586 unsigned long flags;
587
588 if (!ftrace_profile_enabled)
589 return;
590
591 local_irq_save(flags);
592
593 stat = &__get_cpu_var(ftrace_profile_stats);
594 if (!stat->hash)
595 goto out;
596
597 rec = ftrace_find_profiled_func(stat, ip);
598 if (!rec) {
599 rec = ftrace_profile_alloc(stat, ip);
600 if (!rec)
601 goto out;
602 }
603
604 rec->counter++;
605 out:
606 local_irq_restore(flags);
607 }
608
609 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
610 static int profile_graph_entry(struct ftrace_graph_ent *trace)
611 {
612 function_profile_call(trace->func, 0);
613 return 1;
614 }
615
616 static void profile_graph_return(struct ftrace_graph_ret *trace)
617 {
618 struct ftrace_profile_stat *stat;
619 unsigned long long calltime;
620 struct ftrace_profile *rec;
621 unsigned long flags;
622
623 local_irq_save(flags);
624 stat = &__get_cpu_var(ftrace_profile_stats);
625 if (!stat->hash)
626 goto out;
627
628 calltime = trace->rettime - trace->calltime;
629
630 if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
631 int index;
632
633 index = trace->depth;
634
635 /* Append this call time to the parent time to subtract */
636 if (index)
637 current->ret_stack[index - 1].subtime += calltime;
638
639 if (current->ret_stack[index].subtime < calltime)
640 calltime -= current->ret_stack[index].subtime;
641 else
642 calltime = 0;
643 }
644
645 rec = ftrace_find_profiled_func(stat, trace->func);
646 if (rec)
647 rec->time += calltime;
648
649 out:
650 local_irq_restore(flags);
651 }
652
653 static int register_ftrace_profiler(void)
654 {
655 return register_ftrace_graph(&profile_graph_return,
656 &profile_graph_entry);
657 }
658
659 static void unregister_ftrace_profiler(void)
660 {
661 unregister_ftrace_graph();
662 }
663 #else
664 static struct ftrace_ops ftrace_profile_ops __read_mostly =
665 {
666 .func = function_profile_call,
667 };
668
669 static int register_ftrace_profiler(void)
670 {
671 return register_ftrace_function(&ftrace_profile_ops);
672 }
673
674 static void unregister_ftrace_profiler(void)
675 {
676 unregister_ftrace_function(&ftrace_profile_ops);
677 }
678 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
679
680 static ssize_t
681 ftrace_profile_write(struct file *filp, const char __user *ubuf,
682 size_t cnt, loff_t *ppos)
683 {
684 unsigned long val;
685 char buf[64]; /* big enough to hold a number */
686 int ret;
687
688 if (cnt >= sizeof(buf))
689 return -EINVAL;
690
691 if (copy_from_user(&buf, ubuf, cnt))
692 return -EFAULT;
693
694 buf[cnt] = 0;
695
696 ret = strict_strtoul(buf, 10, &val);
697 if (ret < 0)
698 return ret;
699
700 val = !!val;
701
702 mutex_lock(&ftrace_profile_lock);
703 if (ftrace_profile_enabled ^ val) {
704 if (val) {
705 ret = ftrace_profile_init();
706 if (ret < 0) {
707 cnt = ret;
708 goto out;
709 }
710
711 ret = register_ftrace_profiler();
712 if (ret < 0) {
713 cnt = ret;
714 goto out;
715 }
716 ftrace_profile_enabled = 1;
717 } else {
718 ftrace_profile_enabled = 0;
719 unregister_ftrace_profiler();
720 }
721 }
722 out:
723 mutex_unlock(&ftrace_profile_lock);
724
725 filp->f_pos += cnt;
726
727 return cnt;
728 }
729
730 static ssize_t
731 ftrace_profile_read(struct file *filp, char __user *ubuf,
732 size_t cnt, loff_t *ppos)
733 {
734 char buf[64]; /* big enough to hold a number */
735 int r;
736
737 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
738 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
739 }
740
741 static const struct file_operations ftrace_profile_fops = {
742 .open = tracing_open_generic,
743 .read = ftrace_profile_read,
744 .write = ftrace_profile_write,
745 };
746
747 /* used to initialize the real stat files */
748 static struct tracer_stat function_stats __initdata = {
749 .name = "functions",
750 .stat_start = function_stat_start,
751 .stat_next = function_stat_next,
752 .stat_cmp = function_stat_cmp,
753 .stat_headers = function_stat_headers,
754 .stat_show = function_stat_show
755 };
756
757 static void ftrace_profile_debugfs(struct dentry *d_tracer)
758 {
759 struct ftrace_profile_stat *stat;
760 struct dentry *entry;
761 char *name;
762 int ret;
763 int cpu;
764
765 for_each_possible_cpu(cpu) {
766 stat = &per_cpu(ftrace_profile_stats, cpu);
767
768 /* allocate enough for function name + cpu number */
769 name = kmalloc(32, GFP_KERNEL);
770 if (!name) {
771 /*
772 * The files created are permanent, if something happens
773 * we still do not free memory.
774 */
775 kfree(stat);
776 WARN(1,
777 "Could not allocate stat file for cpu %d\n",
778 cpu);
779 return;
780 }
781 stat->stat = function_stats;
782 snprintf(name, 32, "function%d", cpu);
783 stat->stat.name = name;
784 ret = register_stat_tracer(&stat->stat);
785 if (ret) {
786 WARN(1,
787 "Could not register function stat for cpu %d\n",
788 cpu);
789 kfree(name);
790 return;
791 }
792 }
793
794 entry = debugfs_create_file("function_profile_enabled", 0644,
795 d_tracer, NULL, &ftrace_profile_fops);
796 if (!entry)
797 pr_warning("Could not create debugfs "
798 "'function_profile_enabled' entry\n");
799 }
800
801 #else /* CONFIG_FUNCTION_PROFILER */
802 static void ftrace_profile_debugfs(struct dentry *d_tracer)
803 {
804 }
805 #endif /* CONFIG_FUNCTION_PROFILER */
806
807 /* set when tracing only a pid */
808 struct pid *ftrace_pid_trace;
809 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
810
811 #ifdef CONFIG_DYNAMIC_FTRACE
812
813 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
814 # error Dynamic ftrace depends on MCOUNT_RECORD
815 #endif
816
817 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
818
819 struct ftrace_func_probe {
820 struct hlist_node node;
821 struct ftrace_probe_ops *ops;
822 unsigned long flags;
823 unsigned long ip;
824 void *data;
825 struct rcu_head rcu;
826 };
827
828 enum {
829 FTRACE_ENABLE_CALLS = (1 << 0),
830 FTRACE_DISABLE_CALLS = (1 << 1),
831 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
832 FTRACE_ENABLE_MCOUNT = (1 << 3),
833 FTRACE_DISABLE_MCOUNT = (1 << 4),
834 FTRACE_START_FUNC_RET = (1 << 5),
835 FTRACE_STOP_FUNC_RET = (1 << 6),
836 };
837
838 static int ftrace_filtered;
839
840 static struct dyn_ftrace *ftrace_new_addrs;
841
842 static DEFINE_MUTEX(ftrace_regex_lock);
843
844 struct ftrace_page {
845 struct ftrace_page *next;
846 int index;
847 struct dyn_ftrace records[];
848 };
849
850 #define ENTRIES_PER_PAGE \
851 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
852
853 /* estimate from running different kernels */
854 #define NR_TO_INIT 10000
855
856 static struct ftrace_page *ftrace_pages_start;
857 static struct ftrace_page *ftrace_pages;
858
859 static struct dyn_ftrace *ftrace_free_records;
860
861 /*
862 * This is a double for. Do not use 'break' to break out of the loop,
863 * you must use a goto.
864 */
865 #define do_for_each_ftrace_rec(pg, rec) \
866 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
867 int _____i; \
868 for (_____i = 0; _____i < pg->index; _____i++) { \
869 rec = &pg->records[_____i];
870
871 #define while_for_each_ftrace_rec() \
872 } \
873 }
874
875 #ifdef CONFIG_KPROBES
876
877 static int frozen_record_count;
878
879 static inline void freeze_record(struct dyn_ftrace *rec)
880 {
881 if (!(rec->flags & FTRACE_FL_FROZEN)) {
882 rec->flags |= FTRACE_FL_FROZEN;
883 frozen_record_count++;
884 }
885 }
886
887 static inline void unfreeze_record(struct dyn_ftrace *rec)
888 {
889 if (rec->flags & FTRACE_FL_FROZEN) {
890 rec->flags &= ~FTRACE_FL_FROZEN;
891 frozen_record_count--;
892 }
893 }
894
895 static inline int record_frozen(struct dyn_ftrace *rec)
896 {
897 return rec->flags & FTRACE_FL_FROZEN;
898 }
899 #else
900 # define freeze_record(rec) ({ 0; })
901 # define unfreeze_record(rec) ({ 0; })
902 # define record_frozen(rec) ({ 0; })
903 #endif /* CONFIG_KPROBES */
904
905 static void ftrace_free_rec(struct dyn_ftrace *rec)
906 {
907 rec->freelist = ftrace_free_records;
908 ftrace_free_records = rec;
909 rec->flags |= FTRACE_FL_FREE;
910 }
911
912 void ftrace_release(void *start, unsigned long size)
913 {
914 struct dyn_ftrace *rec;
915 struct ftrace_page *pg;
916 unsigned long s = (unsigned long)start;
917 unsigned long e = s + size;
918
919 if (ftrace_disabled || !start)
920 return;
921
922 mutex_lock(&ftrace_lock);
923 do_for_each_ftrace_rec(pg, rec) {
924 if ((rec->ip >= s) && (rec->ip < e) &&
925 !(rec->flags & FTRACE_FL_FREE))
926 ftrace_free_rec(rec);
927 } while_for_each_ftrace_rec();
928 mutex_unlock(&ftrace_lock);
929 }
930
931 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
932 {
933 struct dyn_ftrace *rec;
934
935 /* First check for freed records */
936 if (ftrace_free_records) {
937 rec = ftrace_free_records;
938
939 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
940 FTRACE_WARN_ON_ONCE(1);
941 ftrace_free_records = NULL;
942 return NULL;
943 }
944
945 ftrace_free_records = rec->freelist;
946 memset(rec, 0, sizeof(*rec));
947 return rec;
948 }
949
950 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
951 if (!ftrace_pages->next) {
952 /* allocate another page */
953 ftrace_pages->next =
954 (void *)get_zeroed_page(GFP_KERNEL);
955 if (!ftrace_pages->next)
956 return NULL;
957 }
958 ftrace_pages = ftrace_pages->next;
959 }
960
961 return &ftrace_pages->records[ftrace_pages->index++];
962 }
963
964 static struct dyn_ftrace *
965 ftrace_record_ip(unsigned long ip)
966 {
967 struct dyn_ftrace *rec;
968
969 if (ftrace_disabled)
970 return NULL;
971
972 rec = ftrace_alloc_dyn_node(ip);
973 if (!rec)
974 return NULL;
975
976 rec->ip = ip;
977 rec->newlist = ftrace_new_addrs;
978 ftrace_new_addrs = rec;
979
980 return rec;
981 }
982
983 static void print_ip_ins(const char *fmt, unsigned char *p)
984 {
985 int i;
986
987 printk(KERN_CONT "%s", fmt);
988
989 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
990 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
991 }
992
993 static void ftrace_bug(int failed, unsigned long ip)
994 {
995 switch (failed) {
996 case -EFAULT:
997 FTRACE_WARN_ON_ONCE(1);
998 pr_info("ftrace faulted on modifying ");
999 print_ip_sym(ip);
1000 break;
1001 case -EINVAL:
1002 FTRACE_WARN_ON_ONCE(1);
1003 pr_info("ftrace failed to modify ");
1004 print_ip_sym(ip);
1005 print_ip_ins(" actual: ", (unsigned char *)ip);
1006 printk(KERN_CONT "\n");
1007 break;
1008 case -EPERM:
1009 FTRACE_WARN_ON_ONCE(1);
1010 pr_info("ftrace faulted on writing ");
1011 print_ip_sym(ip);
1012 break;
1013 default:
1014 FTRACE_WARN_ON_ONCE(1);
1015 pr_info("ftrace faulted on unknown error ");
1016 print_ip_sym(ip);
1017 }
1018 }
1019
1020
1021 static int
1022 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1023 {
1024 unsigned long ftrace_addr;
1025 unsigned long ip, fl;
1026
1027 ftrace_addr = (unsigned long)FTRACE_ADDR;
1028
1029 ip = rec->ip;
1030
1031 /*
1032 * If this record is not to be traced and
1033 * it is not enabled then do nothing.
1034 *
1035 * If this record is not to be traced and
1036 * it is enabled then disable it.
1037 *
1038 */
1039 if (rec->flags & FTRACE_FL_NOTRACE) {
1040 if (rec->flags & FTRACE_FL_ENABLED)
1041 rec->flags &= ~FTRACE_FL_ENABLED;
1042 else
1043 return 0;
1044
1045 } else if (ftrace_filtered && enable) {
1046 /*
1047 * Filtering is on:
1048 */
1049
1050 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
1051
1052 /* Record is filtered and enabled, do nothing */
1053 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
1054 return 0;
1055
1056 /* Record is not filtered or enabled, do nothing */
1057 if (!fl)
1058 return 0;
1059
1060 /* Record is not filtered but enabled, disable it */
1061 if (fl == FTRACE_FL_ENABLED)
1062 rec->flags &= ~FTRACE_FL_ENABLED;
1063 else
1064 /* Otherwise record is filtered but not enabled, enable it */
1065 rec->flags |= FTRACE_FL_ENABLED;
1066 } else {
1067 /* Disable or not filtered */
1068
1069 if (enable) {
1070 /* if record is enabled, do nothing */
1071 if (rec->flags & FTRACE_FL_ENABLED)
1072 return 0;
1073
1074 rec->flags |= FTRACE_FL_ENABLED;
1075
1076 } else {
1077
1078 /* if record is not enabled, do nothing */
1079 if (!(rec->flags & FTRACE_FL_ENABLED))
1080 return 0;
1081
1082 rec->flags &= ~FTRACE_FL_ENABLED;
1083 }
1084 }
1085
1086 if (rec->flags & FTRACE_FL_ENABLED)
1087 return ftrace_make_call(rec, ftrace_addr);
1088 else
1089 return ftrace_make_nop(NULL, rec, ftrace_addr);
1090 }
1091
1092 static void ftrace_replace_code(int enable)
1093 {
1094 struct dyn_ftrace *rec;
1095 struct ftrace_page *pg;
1096 int failed;
1097
1098 do_for_each_ftrace_rec(pg, rec) {
1099 /*
1100 * Skip over free records, records that have
1101 * failed and not converted.
1102 */
1103 if (rec->flags & FTRACE_FL_FREE ||
1104 rec->flags & FTRACE_FL_FAILED ||
1105 !(rec->flags & FTRACE_FL_CONVERTED))
1106 continue;
1107
1108 /* ignore updates to this record's mcount site */
1109 if (get_kprobe((void *)rec->ip)) {
1110 freeze_record(rec);
1111 continue;
1112 } else {
1113 unfreeze_record(rec);
1114 }
1115
1116 failed = __ftrace_replace_code(rec, enable);
1117 if (failed) {
1118 rec->flags |= FTRACE_FL_FAILED;
1119 if ((system_state == SYSTEM_BOOTING) ||
1120 !core_kernel_text(rec->ip)) {
1121 ftrace_free_rec(rec);
1122 } else {
1123 ftrace_bug(failed, rec->ip);
1124 /* Stop processing */
1125 return;
1126 }
1127 }
1128 } while_for_each_ftrace_rec();
1129 }
1130
1131 static int
1132 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1133 {
1134 unsigned long ip;
1135 int ret;
1136
1137 ip = rec->ip;
1138
1139 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1140 if (ret) {
1141 ftrace_bug(ret, ip);
1142 rec->flags |= FTRACE_FL_FAILED;
1143 return 0;
1144 }
1145 return 1;
1146 }
1147
1148 /*
1149 * archs can override this function if they must do something
1150 * before the modifying code is performed.
1151 */
1152 int __weak ftrace_arch_code_modify_prepare(void)
1153 {
1154 return 0;
1155 }
1156
1157 /*
1158 * archs can override this function if they must do something
1159 * after the modifying code is performed.
1160 */
1161 int __weak ftrace_arch_code_modify_post_process(void)
1162 {
1163 return 0;
1164 }
1165
1166 static int __ftrace_modify_code(void *data)
1167 {
1168 int *command = data;
1169
1170 if (*command & FTRACE_ENABLE_CALLS)
1171 ftrace_replace_code(1);
1172 else if (*command & FTRACE_DISABLE_CALLS)
1173 ftrace_replace_code(0);
1174
1175 if (*command & FTRACE_UPDATE_TRACE_FUNC)
1176 ftrace_update_ftrace_func(ftrace_trace_function);
1177
1178 if (*command & FTRACE_START_FUNC_RET)
1179 ftrace_enable_ftrace_graph_caller();
1180 else if (*command & FTRACE_STOP_FUNC_RET)
1181 ftrace_disable_ftrace_graph_caller();
1182
1183 return 0;
1184 }
1185
1186 static void ftrace_run_update_code(int command)
1187 {
1188 int ret;
1189
1190 ret = ftrace_arch_code_modify_prepare();
1191 FTRACE_WARN_ON(ret);
1192 if (ret)
1193 return;
1194
1195 stop_machine(__ftrace_modify_code, &command, NULL);
1196
1197 ret = ftrace_arch_code_modify_post_process();
1198 FTRACE_WARN_ON(ret);
1199 }
1200
1201 static ftrace_func_t saved_ftrace_func;
1202 static int ftrace_start_up;
1203
1204 static void ftrace_startup_enable(int command)
1205 {
1206 if (saved_ftrace_func != ftrace_trace_function) {
1207 saved_ftrace_func = ftrace_trace_function;
1208 command |= FTRACE_UPDATE_TRACE_FUNC;
1209 }
1210
1211 if (!command || !ftrace_enabled)
1212 return;
1213
1214 ftrace_run_update_code(command);
1215 }
1216
1217 static void ftrace_startup(int command)
1218 {
1219 if (unlikely(ftrace_disabled))
1220 return;
1221
1222 ftrace_start_up++;
1223 command |= FTRACE_ENABLE_CALLS;
1224
1225 ftrace_startup_enable(command);
1226 }
1227
1228 static void ftrace_shutdown(int command)
1229 {
1230 if (unlikely(ftrace_disabled))
1231 return;
1232
1233 ftrace_start_up--;
1234 if (!ftrace_start_up)
1235 command |= FTRACE_DISABLE_CALLS;
1236
1237 if (saved_ftrace_func != ftrace_trace_function) {
1238 saved_ftrace_func = ftrace_trace_function;
1239 command |= FTRACE_UPDATE_TRACE_FUNC;
1240 }
1241
1242 if (!command || !ftrace_enabled)
1243 return;
1244
1245 ftrace_run_update_code(command);
1246 }
1247
1248 static void ftrace_startup_sysctl(void)
1249 {
1250 int command = FTRACE_ENABLE_MCOUNT;
1251
1252 if (unlikely(ftrace_disabled))
1253 return;
1254
1255 /* Force update next time */
1256 saved_ftrace_func = NULL;
1257 /* ftrace_start_up is true if we want ftrace running */
1258 if (ftrace_start_up)
1259 command |= FTRACE_ENABLE_CALLS;
1260
1261 ftrace_run_update_code(command);
1262 }
1263
1264 static void ftrace_shutdown_sysctl(void)
1265 {
1266 int command = FTRACE_DISABLE_MCOUNT;
1267
1268 if (unlikely(ftrace_disabled))
1269 return;
1270
1271 /* ftrace_start_up is true if ftrace is running */
1272 if (ftrace_start_up)
1273 command |= FTRACE_DISABLE_CALLS;
1274
1275 ftrace_run_update_code(command);
1276 }
1277
1278 static cycle_t ftrace_update_time;
1279 static unsigned long ftrace_update_cnt;
1280 unsigned long ftrace_update_tot_cnt;
1281
1282 static int ftrace_update_code(struct module *mod)
1283 {
1284 struct dyn_ftrace *p;
1285 cycle_t start, stop;
1286
1287 start = ftrace_now(raw_smp_processor_id());
1288 ftrace_update_cnt = 0;
1289
1290 while (ftrace_new_addrs) {
1291
1292 /* If something went wrong, bail without enabling anything */
1293 if (unlikely(ftrace_disabled))
1294 return -1;
1295
1296 p = ftrace_new_addrs;
1297 ftrace_new_addrs = p->newlist;
1298 p->flags = 0L;
1299
1300 /* convert record (i.e, patch mcount-call with NOP) */
1301 if (ftrace_code_disable(mod, p)) {
1302 p->flags |= FTRACE_FL_CONVERTED;
1303 ftrace_update_cnt++;
1304 } else
1305 ftrace_free_rec(p);
1306 }
1307
1308 stop = ftrace_now(raw_smp_processor_id());
1309 ftrace_update_time = stop - start;
1310 ftrace_update_tot_cnt += ftrace_update_cnt;
1311
1312 return 0;
1313 }
1314
1315 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
1316 {
1317 struct ftrace_page *pg;
1318 int cnt;
1319 int i;
1320
1321 /* allocate a few pages */
1322 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
1323 if (!ftrace_pages_start)
1324 return -1;
1325
1326 /*
1327 * Allocate a few more pages.
1328 *
1329 * TODO: have some parser search vmlinux before
1330 * final linking to find all calls to ftrace.
1331 * Then we can:
1332 * a) know how many pages to allocate.
1333 * and/or
1334 * b) set up the table then.
1335 *
1336 * The dynamic code is still necessary for
1337 * modules.
1338 */
1339
1340 pg = ftrace_pages = ftrace_pages_start;
1341
1342 cnt = num_to_init / ENTRIES_PER_PAGE;
1343 pr_info("ftrace: allocating %ld entries in %d pages\n",
1344 num_to_init, cnt + 1);
1345
1346 for (i = 0; i < cnt; i++) {
1347 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
1348
1349 /* If we fail, we'll try later anyway */
1350 if (!pg->next)
1351 break;
1352
1353 pg = pg->next;
1354 }
1355
1356 return 0;
1357 }
1358
1359 enum {
1360 FTRACE_ITER_FILTER = (1 << 0),
1361 FTRACE_ITER_CONT = (1 << 1),
1362 FTRACE_ITER_NOTRACE = (1 << 2),
1363 FTRACE_ITER_FAILURES = (1 << 3),
1364 FTRACE_ITER_PRINTALL = (1 << 4),
1365 FTRACE_ITER_HASH = (1 << 5),
1366 };
1367
1368 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
1369
1370 struct ftrace_iterator {
1371 struct ftrace_page *pg;
1372 int hidx;
1373 int idx;
1374 unsigned flags;
1375 unsigned char buffer[FTRACE_BUFF_MAX+1];
1376 unsigned buffer_idx;
1377 unsigned filtered;
1378 };
1379
1380 static void *
1381 t_hash_next(struct seq_file *m, void *v, loff_t *pos)
1382 {
1383 struct ftrace_iterator *iter = m->private;
1384 struct hlist_node *hnd = v;
1385 struct hlist_head *hhd;
1386
1387 WARN_ON(!(iter->flags & FTRACE_ITER_HASH));
1388
1389 (*pos)++;
1390
1391 retry:
1392 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
1393 return NULL;
1394
1395 hhd = &ftrace_func_hash[iter->hidx];
1396
1397 if (hlist_empty(hhd)) {
1398 iter->hidx++;
1399 hnd = NULL;
1400 goto retry;
1401 }
1402
1403 if (!hnd)
1404 hnd = hhd->first;
1405 else {
1406 hnd = hnd->next;
1407 if (!hnd) {
1408 iter->hidx++;
1409 goto retry;
1410 }
1411 }
1412
1413 return hnd;
1414 }
1415
1416 static void *t_hash_start(struct seq_file *m, loff_t *pos)
1417 {
1418 struct ftrace_iterator *iter = m->private;
1419 void *p = NULL;
1420
1421 iter->flags |= FTRACE_ITER_HASH;
1422
1423 return t_hash_next(m, p, pos);
1424 }
1425
1426 static int t_hash_show(struct seq_file *m, void *v)
1427 {
1428 struct ftrace_func_probe *rec;
1429 struct hlist_node *hnd = v;
1430 char str[KSYM_SYMBOL_LEN];
1431
1432 rec = hlist_entry(hnd, struct ftrace_func_probe, node);
1433
1434 if (rec->ops->print)
1435 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
1436
1437 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1438 seq_printf(m, "%s:", str);
1439
1440 kallsyms_lookup((unsigned long)rec->ops->func, NULL, NULL, NULL, str);
1441 seq_printf(m, "%s", str);
1442
1443 if (rec->data)
1444 seq_printf(m, ":%p", rec->data);
1445 seq_putc(m, '\n');
1446
1447 return 0;
1448 }
1449
1450 static void *
1451 t_next(struct seq_file *m, void *v, loff_t *pos)
1452 {
1453 struct ftrace_iterator *iter = m->private;
1454 struct dyn_ftrace *rec = NULL;
1455
1456 if (iter->flags & FTRACE_ITER_HASH)
1457 return t_hash_next(m, v, pos);
1458
1459 (*pos)++;
1460
1461 if (iter->flags & FTRACE_ITER_PRINTALL)
1462 return NULL;
1463
1464 retry:
1465 if (iter->idx >= iter->pg->index) {
1466 if (iter->pg->next) {
1467 iter->pg = iter->pg->next;
1468 iter->idx = 0;
1469 goto retry;
1470 } else {
1471 iter->idx = -1;
1472 }
1473 } else {
1474 rec = &iter->pg->records[iter->idx++];
1475 if ((rec->flags & FTRACE_FL_FREE) ||
1476
1477 (!(iter->flags & FTRACE_ITER_FAILURES) &&
1478 (rec->flags & FTRACE_FL_FAILED)) ||
1479
1480 ((iter->flags & FTRACE_ITER_FAILURES) &&
1481 !(rec->flags & FTRACE_FL_FAILED)) ||
1482
1483 ((iter->flags & FTRACE_ITER_FILTER) &&
1484 !(rec->flags & FTRACE_FL_FILTER)) ||
1485
1486 ((iter->flags & FTRACE_ITER_NOTRACE) &&
1487 !(rec->flags & FTRACE_FL_NOTRACE))) {
1488 rec = NULL;
1489 goto retry;
1490 }
1491 }
1492
1493 return rec;
1494 }
1495
1496 static void *t_start(struct seq_file *m, loff_t *pos)
1497 {
1498 struct ftrace_iterator *iter = m->private;
1499 void *p = NULL;
1500
1501 mutex_lock(&ftrace_lock);
1502 /*
1503 * For set_ftrace_filter reading, if we have the filter
1504 * off, we can short cut and just print out that all
1505 * functions are enabled.
1506 */
1507 if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
1508 if (*pos > 0)
1509 return t_hash_start(m, pos);
1510 iter->flags |= FTRACE_ITER_PRINTALL;
1511 (*pos)++;
1512 return iter;
1513 }
1514
1515 if (iter->flags & FTRACE_ITER_HASH)
1516 return t_hash_start(m, pos);
1517
1518 if (*pos > 0) {
1519 if (iter->idx < 0)
1520 return p;
1521 (*pos)--;
1522 iter->idx--;
1523 }
1524
1525 p = t_next(m, p, pos);
1526
1527 if (!p)
1528 return t_hash_start(m, pos);
1529
1530 return p;
1531 }
1532
1533 static void t_stop(struct seq_file *m, void *p)
1534 {
1535 mutex_unlock(&ftrace_lock);
1536 }
1537
1538 static int t_show(struct seq_file *m, void *v)
1539 {
1540 struct ftrace_iterator *iter = m->private;
1541 struct dyn_ftrace *rec = v;
1542 char str[KSYM_SYMBOL_LEN];
1543
1544 if (iter->flags & FTRACE_ITER_HASH)
1545 return t_hash_show(m, v);
1546
1547 if (iter->flags & FTRACE_ITER_PRINTALL) {
1548 seq_printf(m, "#### all functions enabled ####\n");
1549 return 0;
1550 }
1551
1552 if (!rec)
1553 return 0;
1554
1555 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1556
1557 seq_printf(m, "%s\n", str);
1558
1559 return 0;
1560 }
1561
1562 static struct seq_operations show_ftrace_seq_ops = {
1563 .start = t_start,
1564 .next = t_next,
1565 .stop = t_stop,
1566 .show = t_show,
1567 };
1568
1569 static int
1570 ftrace_avail_open(struct inode *inode, struct file *file)
1571 {
1572 struct ftrace_iterator *iter;
1573 int ret;
1574
1575 if (unlikely(ftrace_disabled))
1576 return -ENODEV;
1577
1578 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1579 if (!iter)
1580 return -ENOMEM;
1581
1582 iter->pg = ftrace_pages_start;
1583
1584 ret = seq_open(file, &show_ftrace_seq_ops);
1585 if (!ret) {
1586 struct seq_file *m = file->private_data;
1587
1588 m->private = iter;
1589 } else {
1590 kfree(iter);
1591 }
1592
1593 return ret;
1594 }
1595
1596 int ftrace_avail_release(struct inode *inode, struct file *file)
1597 {
1598 struct seq_file *m = (struct seq_file *)file->private_data;
1599 struct ftrace_iterator *iter = m->private;
1600
1601 seq_release(inode, file);
1602 kfree(iter);
1603
1604 return 0;
1605 }
1606
1607 static int
1608 ftrace_failures_open(struct inode *inode, struct file *file)
1609 {
1610 int ret;
1611 struct seq_file *m;
1612 struct ftrace_iterator *iter;
1613
1614 ret = ftrace_avail_open(inode, file);
1615 if (!ret) {
1616 m = (struct seq_file *)file->private_data;
1617 iter = (struct ftrace_iterator *)m->private;
1618 iter->flags = FTRACE_ITER_FAILURES;
1619 }
1620
1621 return ret;
1622 }
1623
1624
1625 static void ftrace_filter_reset(int enable)
1626 {
1627 struct ftrace_page *pg;
1628 struct dyn_ftrace *rec;
1629 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1630
1631 mutex_lock(&ftrace_lock);
1632 if (enable)
1633 ftrace_filtered = 0;
1634 do_for_each_ftrace_rec(pg, rec) {
1635 if (rec->flags & FTRACE_FL_FAILED)
1636 continue;
1637 rec->flags &= ~type;
1638 } while_for_each_ftrace_rec();
1639 mutex_unlock(&ftrace_lock);
1640 }
1641
1642 static int
1643 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1644 {
1645 struct ftrace_iterator *iter;
1646 int ret = 0;
1647
1648 if (unlikely(ftrace_disabled))
1649 return -ENODEV;
1650
1651 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1652 if (!iter)
1653 return -ENOMEM;
1654
1655 mutex_lock(&ftrace_regex_lock);
1656 if ((file->f_mode & FMODE_WRITE) &&
1657 !(file->f_flags & O_APPEND))
1658 ftrace_filter_reset(enable);
1659
1660 if (file->f_mode & FMODE_READ) {
1661 iter->pg = ftrace_pages_start;
1662 iter->flags = enable ? FTRACE_ITER_FILTER :
1663 FTRACE_ITER_NOTRACE;
1664
1665 ret = seq_open(file, &show_ftrace_seq_ops);
1666 if (!ret) {
1667 struct seq_file *m = file->private_data;
1668 m->private = iter;
1669 } else
1670 kfree(iter);
1671 } else
1672 file->private_data = iter;
1673 mutex_unlock(&ftrace_regex_lock);
1674
1675 return ret;
1676 }
1677
1678 static int
1679 ftrace_filter_open(struct inode *inode, struct file *file)
1680 {
1681 return ftrace_regex_open(inode, file, 1);
1682 }
1683
1684 static int
1685 ftrace_notrace_open(struct inode *inode, struct file *file)
1686 {
1687 return ftrace_regex_open(inode, file, 0);
1688 }
1689
1690 static loff_t
1691 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1692 {
1693 loff_t ret;
1694
1695 if (file->f_mode & FMODE_READ)
1696 ret = seq_lseek(file, offset, origin);
1697 else
1698 file->f_pos = ret = 1;
1699
1700 return ret;
1701 }
1702
1703 enum {
1704 MATCH_FULL,
1705 MATCH_FRONT_ONLY,
1706 MATCH_MIDDLE_ONLY,
1707 MATCH_END_ONLY,
1708 };
1709
1710 /*
1711 * (static function - no need for kernel doc)
1712 *
1713 * Pass in a buffer containing a glob and this function will
1714 * set search to point to the search part of the buffer and
1715 * return the type of search it is (see enum above).
1716 * This does modify buff.
1717 *
1718 * Returns enum type.
1719 * search returns the pointer to use for comparison.
1720 * not returns 1 if buff started with a '!'
1721 * 0 otherwise.
1722 */
1723 static int
1724 ftrace_setup_glob(char *buff, int len, char **search, int *not)
1725 {
1726 int type = MATCH_FULL;
1727 int i;
1728
1729 if (buff[0] == '!') {
1730 *not = 1;
1731 buff++;
1732 len--;
1733 } else
1734 *not = 0;
1735
1736 *search = buff;
1737
1738 for (i = 0; i < len; i++) {
1739 if (buff[i] == '*') {
1740 if (!i) {
1741 *search = buff + 1;
1742 type = MATCH_END_ONLY;
1743 } else {
1744 if (type == MATCH_END_ONLY)
1745 type = MATCH_MIDDLE_ONLY;
1746 else
1747 type = MATCH_FRONT_ONLY;
1748 buff[i] = 0;
1749 break;
1750 }
1751 }
1752 }
1753
1754 return type;
1755 }
1756
1757 static int ftrace_match(char *str, char *regex, int len, int type)
1758 {
1759 int matched = 0;
1760 char *ptr;
1761
1762 switch (type) {
1763 case MATCH_FULL:
1764 if (strcmp(str, regex) == 0)
1765 matched = 1;
1766 break;
1767 case MATCH_FRONT_ONLY:
1768 if (strncmp(str, regex, len) == 0)
1769 matched = 1;
1770 break;
1771 case MATCH_MIDDLE_ONLY:
1772 if (strstr(str, regex))
1773 matched = 1;
1774 break;
1775 case MATCH_END_ONLY:
1776 ptr = strstr(str, regex);
1777 if (ptr && (ptr[len] == 0))
1778 matched = 1;
1779 break;
1780 }
1781
1782 return matched;
1783 }
1784
1785 static int
1786 ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
1787 {
1788 char str[KSYM_SYMBOL_LEN];
1789
1790 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1791 return ftrace_match(str, regex, len, type);
1792 }
1793
1794 static void ftrace_match_records(char *buff, int len, int enable)
1795 {
1796 unsigned int search_len;
1797 struct ftrace_page *pg;
1798 struct dyn_ftrace *rec;
1799 unsigned long flag;
1800 char *search;
1801 int type;
1802 int not;
1803
1804 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1805 type = ftrace_setup_glob(buff, len, &search, &not);
1806
1807 search_len = strlen(search);
1808
1809 mutex_lock(&ftrace_lock);
1810 do_for_each_ftrace_rec(pg, rec) {
1811
1812 if (rec->flags & FTRACE_FL_FAILED)
1813 continue;
1814
1815 if (ftrace_match_record(rec, search, search_len, type)) {
1816 if (not)
1817 rec->flags &= ~flag;
1818 else
1819 rec->flags |= flag;
1820 }
1821 /*
1822 * Only enable filtering if we have a function that
1823 * is filtered on.
1824 */
1825 if (enable && (rec->flags & FTRACE_FL_FILTER))
1826 ftrace_filtered = 1;
1827 } while_for_each_ftrace_rec();
1828 mutex_unlock(&ftrace_lock);
1829 }
1830
1831 static int
1832 ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
1833 char *regex, int len, int type)
1834 {
1835 char str[KSYM_SYMBOL_LEN];
1836 char *modname;
1837
1838 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
1839
1840 if (!modname || strcmp(modname, mod))
1841 return 0;
1842
1843 /* blank search means to match all funcs in the mod */
1844 if (len)
1845 return ftrace_match(str, regex, len, type);
1846 else
1847 return 1;
1848 }
1849
1850 static void ftrace_match_module_records(char *buff, char *mod, int enable)
1851 {
1852 unsigned search_len = 0;
1853 struct ftrace_page *pg;
1854 struct dyn_ftrace *rec;
1855 int type = MATCH_FULL;
1856 char *search = buff;
1857 unsigned long flag;
1858 int not = 0;
1859
1860 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1861
1862 /* blank or '*' mean the same */
1863 if (strcmp(buff, "*") == 0)
1864 buff[0] = 0;
1865
1866 /* handle the case of 'dont filter this module' */
1867 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
1868 buff[0] = 0;
1869 not = 1;
1870 }
1871
1872 if (strlen(buff)) {
1873 type = ftrace_setup_glob(buff, strlen(buff), &search, &not);
1874 search_len = strlen(search);
1875 }
1876
1877 mutex_lock(&ftrace_lock);
1878 do_for_each_ftrace_rec(pg, rec) {
1879
1880 if (rec->flags & FTRACE_FL_FAILED)
1881 continue;
1882
1883 if (ftrace_match_module_record(rec, mod,
1884 search, search_len, type)) {
1885 if (not)
1886 rec->flags &= ~flag;
1887 else
1888 rec->flags |= flag;
1889 }
1890 if (enable && (rec->flags & FTRACE_FL_FILTER))
1891 ftrace_filtered = 1;
1892
1893 } while_for_each_ftrace_rec();
1894 mutex_unlock(&ftrace_lock);
1895 }
1896
1897 /*
1898 * We register the module command as a template to show others how
1899 * to register the a command as well.
1900 */
1901
1902 static int
1903 ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
1904 {
1905 char *mod;
1906
1907 /*
1908 * cmd == 'mod' because we only registered this func
1909 * for the 'mod' ftrace_func_command.
1910 * But if you register one func with multiple commands,
1911 * you can tell which command was used by the cmd
1912 * parameter.
1913 */
1914
1915 /* we must have a module name */
1916 if (!param)
1917 return -EINVAL;
1918
1919 mod = strsep(&param, ":");
1920 if (!strlen(mod))
1921 return -EINVAL;
1922
1923 ftrace_match_module_records(func, mod, enable);
1924 return 0;
1925 }
1926
1927 static struct ftrace_func_command ftrace_mod_cmd = {
1928 .name = "mod",
1929 .func = ftrace_mod_callback,
1930 };
1931
1932 static int __init ftrace_mod_cmd_init(void)
1933 {
1934 return register_ftrace_command(&ftrace_mod_cmd);
1935 }
1936 device_initcall(ftrace_mod_cmd_init);
1937
1938 static void
1939 function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
1940 {
1941 struct ftrace_func_probe *entry;
1942 struct hlist_head *hhd;
1943 struct hlist_node *n;
1944 unsigned long key;
1945 int resched;
1946
1947 key = hash_long(ip, FTRACE_HASH_BITS);
1948
1949 hhd = &ftrace_func_hash[key];
1950
1951 if (hlist_empty(hhd))
1952 return;
1953
1954 /*
1955 * Disable preemption for these calls to prevent a RCU grace
1956 * period. This syncs the hash iteration and freeing of items
1957 * on the hash. rcu_read_lock is too dangerous here.
1958 */
1959 resched = ftrace_preempt_disable();
1960 hlist_for_each_entry_rcu(entry, n, hhd, node) {
1961 if (entry->ip == ip)
1962 entry->ops->func(ip, parent_ip, &entry->data);
1963 }
1964 ftrace_preempt_enable(resched);
1965 }
1966
1967 static struct ftrace_ops trace_probe_ops __read_mostly =
1968 {
1969 .func = function_trace_probe_call,
1970 };
1971
1972 static int ftrace_probe_registered;
1973
1974 static void __enable_ftrace_function_probe(void)
1975 {
1976 int i;
1977
1978 if (ftrace_probe_registered)
1979 return;
1980
1981 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1982 struct hlist_head *hhd = &ftrace_func_hash[i];
1983 if (hhd->first)
1984 break;
1985 }
1986 /* Nothing registered? */
1987 if (i == FTRACE_FUNC_HASHSIZE)
1988 return;
1989
1990 __register_ftrace_function(&trace_probe_ops);
1991 ftrace_startup(0);
1992 ftrace_probe_registered = 1;
1993 }
1994
1995 static void __disable_ftrace_function_probe(void)
1996 {
1997 int i;
1998
1999 if (!ftrace_probe_registered)
2000 return;
2001
2002 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2003 struct hlist_head *hhd = &ftrace_func_hash[i];
2004 if (hhd->first)
2005 return;
2006 }
2007
2008 /* no more funcs left */
2009 __unregister_ftrace_function(&trace_probe_ops);
2010 ftrace_shutdown(0);
2011 ftrace_probe_registered = 0;
2012 }
2013
2014
2015 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
2016 {
2017 struct ftrace_func_probe *entry =
2018 container_of(rhp, struct ftrace_func_probe, rcu);
2019
2020 if (entry->ops->free)
2021 entry->ops->free(&entry->data);
2022 kfree(entry);
2023 }
2024
2025
2026 int
2027 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2028 void *data)
2029 {
2030 struct ftrace_func_probe *entry;
2031 struct ftrace_page *pg;
2032 struct dyn_ftrace *rec;
2033 int type, len, not;
2034 unsigned long key;
2035 int count = 0;
2036 char *search;
2037
2038 type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
2039 len = strlen(search);
2040
2041 /* we do not support '!' for function probes */
2042 if (WARN_ON(not))
2043 return -EINVAL;
2044
2045 mutex_lock(&ftrace_lock);
2046 do_for_each_ftrace_rec(pg, rec) {
2047
2048 if (rec->flags & FTRACE_FL_FAILED)
2049 continue;
2050
2051 if (!ftrace_match_record(rec, search, len, type))
2052 continue;
2053
2054 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2055 if (!entry) {
2056 /* If we did not process any, then return error */
2057 if (!count)
2058 count = -ENOMEM;
2059 goto out_unlock;
2060 }
2061
2062 count++;
2063
2064 entry->data = data;
2065
2066 /*
2067 * The caller might want to do something special
2068 * for each function we find. We call the callback
2069 * to give the caller an opportunity to do so.
2070 */
2071 if (ops->callback) {
2072 if (ops->callback(rec->ip, &entry->data) < 0) {
2073 /* caller does not like this func */
2074 kfree(entry);
2075 continue;
2076 }
2077 }
2078
2079 entry->ops = ops;
2080 entry->ip = rec->ip;
2081
2082 key = hash_long(entry->ip, FTRACE_HASH_BITS);
2083 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
2084
2085 } while_for_each_ftrace_rec();
2086 __enable_ftrace_function_probe();
2087
2088 out_unlock:
2089 mutex_unlock(&ftrace_lock);
2090
2091 return count;
2092 }
2093
2094 enum {
2095 PROBE_TEST_FUNC = 1,
2096 PROBE_TEST_DATA = 2
2097 };
2098
2099 static void
2100 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2101 void *data, int flags)
2102 {
2103 struct ftrace_func_probe *entry;
2104 struct hlist_node *n, *tmp;
2105 char str[KSYM_SYMBOL_LEN];
2106 int type = MATCH_FULL;
2107 int i, len = 0;
2108 char *search;
2109
2110 if (glob && (strcmp(glob, "*") || !strlen(glob)))
2111 glob = NULL;
2112 else {
2113 int not;
2114
2115 type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
2116 len = strlen(search);
2117
2118 /* we do not support '!' for function probes */
2119 if (WARN_ON(not))
2120 return;
2121 }
2122
2123 mutex_lock(&ftrace_lock);
2124 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2125 struct hlist_head *hhd = &ftrace_func_hash[i];
2126
2127 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
2128
2129 /* break up if statements for readability */
2130 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
2131 continue;
2132
2133 if ((flags & PROBE_TEST_DATA) && entry->data != data)
2134 continue;
2135
2136 /* do this last, since it is the most expensive */
2137 if (glob) {
2138 kallsyms_lookup(entry->ip, NULL, NULL,
2139 NULL, str);
2140 if (!ftrace_match(str, glob, len, type))
2141 continue;
2142 }
2143
2144 hlist_del(&entry->node);
2145 call_rcu(&entry->rcu, ftrace_free_entry_rcu);
2146 }
2147 }
2148 __disable_ftrace_function_probe();
2149 mutex_unlock(&ftrace_lock);
2150 }
2151
2152 void
2153 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2154 void *data)
2155 {
2156 __unregister_ftrace_function_probe(glob, ops, data,
2157 PROBE_TEST_FUNC | PROBE_TEST_DATA);
2158 }
2159
2160 void
2161 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
2162 {
2163 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
2164 }
2165
2166 void unregister_ftrace_function_probe_all(char *glob)
2167 {
2168 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
2169 }
2170
2171 static LIST_HEAD(ftrace_commands);
2172 static DEFINE_MUTEX(ftrace_cmd_mutex);
2173
2174 int register_ftrace_command(struct ftrace_func_command *cmd)
2175 {
2176 struct ftrace_func_command *p;
2177 int ret = 0;
2178
2179 mutex_lock(&ftrace_cmd_mutex);
2180 list_for_each_entry(p, &ftrace_commands, list) {
2181 if (strcmp(cmd->name, p->name) == 0) {
2182 ret = -EBUSY;
2183 goto out_unlock;
2184 }
2185 }
2186 list_add(&cmd->list, &ftrace_commands);
2187 out_unlock:
2188 mutex_unlock(&ftrace_cmd_mutex);
2189
2190 return ret;
2191 }
2192
2193 int unregister_ftrace_command(struct ftrace_func_command *cmd)
2194 {
2195 struct ftrace_func_command *p, *n;
2196 int ret = -ENODEV;
2197
2198 mutex_lock(&ftrace_cmd_mutex);
2199 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
2200 if (strcmp(cmd->name, p->name) == 0) {
2201 ret = 0;
2202 list_del_init(&p->list);
2203 goto out_unlock;
2204 }
2205 }
2206 out_unlock:
2207 mutex_unlock(&ftrace_cmd_mutex);
2208
2209 return ret;
2210 }
2211
2212 static int ftrace_process_regex(char *buff, int len, int enable)
2213 {
2214 char *func, *command, *next = buff;
2215 struct ftrace_func_command *p;
2216 int ret = -EINVAL;
2217
2218 func = strsep(&next, ":");
2219
2220 if (!next) {
2221 ftrace_match_records(func, len, enable);
2222 return 0;
2223 }
2224
2225 /* command found */
2226
2227 command = strsep(&next, ":");
2228
2229 mutex_lock(&ftrace_cmd_mutex);
2230 list_for_each_entry(p, &ftrace_commands, list) {
2231 if (strcmp(p->name, command) == 0) {
2232 ret = p->func(func, command, next, enable);
2233 goto out_unlock;
2234 }
2235 }
2236 out_unlock:
2237 mutex_unlock(&ftrace_cmd_mutex);
2238
2239 return ret;
2240 }
2241
2242 static ssize_t
2243 ftrace_regex_write(struct file *file, const char __user *ubuf,
2244 size_t cnt, loff_t *ppos, int enable)
2245 {
2246 struct ftrace_iterator *iter;
2247 char ch;
2248 size_t read = 0;
2249 ssize_t ret;
2250
2251 if (!cnt || cnt < 0)
2252 return 0;
2253
2254 mutex_lock(&ftrace_regex_lock);
2255
2256 if (file->f_mode & FMODE_READ) {
2257 struct seq_file *m = file->private_data;
2258 iter = m->private;
2259 } else
2260 iter = file->private_data;
2261
2262 if (!*ppos) {
2263 iter->flags &= ~FTRACE_ITER_CONT;
2264 iter->buffer_idx = 0;
2265 }
2266
2267 ret = get_user(ch, ubuf++);
2268 if (ret)
2269 goto out;
2270 read++;
2271 cnt--;
2272
2273 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
2274 /* skip white space */
2275 while (cnt && isspace(ch)) {
2276 ret = get_user(ch, ubuf++);
2277 if (ret)
2278 goto out;
2279 read++;
2280 cnt--;
2281 }
2282
2283 if (isspace(ch)) {
2284 file->f_pos += read;
2285 ret = read;
2286 goto out;
2287 }
2288
2289 iter->buffer_idx = 0;
2290 }
2291
2292 while (cnt && !isspace(ch)) {
2293 if (iter->buffer_idx < FTRACE_BUFF_MAX)
2294 iter->buffer[iter->buffer_idx++] = ch;
2295 else {
2296 ret = -EINVAL;
2297 goto out;
2298 }
2299 ret = get_user(ch, ubuf++);
2300 if (ret)
2301 goto out;
2302 read++;
2303 cnt--;
2304 }
2305
2306 if (isspace(ch)) {
2307 iter->filtered++;
2308 iter->buffer[iter->buffer_idx] = 0;
2309 ret = ftrace_process_regex(iter->buffer,
2310 iter->buffer_idx, enable);
2311 if (ret)
2312 goto out;
2313 iter->buffer_idx = 0;
2314 } else
2315 iter->flags |= FTRACE_ITER_CONT;
2316
2317
2318 file->f_pos += read;
2319
2320 ret = read;
2321 out:
2322 mutex_unlock(&ftrace_regex_lock);
2323
2324 return ret;
2325 }
2326
2327 static ssize_t
2328 ftrace_filter_write(struct file *file, const char __user *ubuf,
2329 size_t cnt, loff_t *ppos)
2330 {
2331 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
2332 }
2333
2334 static ssize_t
2335 ftrace_notrace_write(struct file *file, const char __user *ubuf,
2336 size_t cnt, loff_t *ppos)
2337 {
2338 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
2339 }
2340
2341 static void
2342 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
2343 {
2344 if (unlikely(ftrace_disabled))
2345 return;
2346
2347 mutex_lock(&ftrace_regex_lock);
2348 if (reset)
2349 ftrace_filter_reset(enable);
2350 if (buf)
2351 ftrace_match_records(buf, len, enable);
2352 mutex_unlock(&ftrace_regex_lock);
2353 }
2354
2355 /**
2356 * ftrace_set_filter - set a function to filter on in ftrace
2357 * @buf - the string that holds the function filter text.
2358 * @len - the length of the string.
2359 * @reset - non zero to reset all filters before applying this filter.
2360 *
2361 * Filters denote which functions should be enabled when tracing is enabled.
2362 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2363 */
2364 void ftrace_set_filter(unsigned char *buf, int len, int reset)
2365 {
2366 ftrace_set_regex(buf, len, reset, 1);
2367 }
2368
2369 /**
2370 * ftrace_set_notrace - set a function to not trace in ftrace
2371 * @buf - the string that holds the function notrace text.
2372 * @len - the length of the string.
2373 * @reset - non zero to reset all filters before applying this filter.
2374 *
2375 * Notrace Filters denote which functions should not be enabled when tracing
2376 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2377 * for tracing.
2378 */
2379 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
2380 {
2381 ftrace_set_regex(buf, len, reset, 0);
2382 }
2383
2384 static int
2385 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
2386 {
2387 struct seq_file *m = (struct seq_file *)file->private_data;
2388 struct ftrace_iterator *iter;
2389
2390 mutex_lock(&ftrace_regex_lock);
2391 if (file->f_mode & FMODE_READ) {
2392 iter = m->private;
2393
2394 seq_release(inode, file);
2395 } else
2396 iter = file->private_data;
2397
2398 if (iter->buffer_idx) {
2399 iter->filtered++;
2400 iter->buffer[iter->buffer_idx] = 0;
2401 ftrace_match_records(iter->buffer, iter->buffer_idx, enable);
2402 }
2403
2404 mutex_lock(&ftrace_lock);
2405 if (ftrace_start_up && ftrace_enabled)
2406 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
2407 mutex_unlock(&ftrace_lock);
2408
2409 kfree(iter);
2410 mutex_unlock(&ftrace_regex_lock);
2411 return 0;
2412 }
2413
2414 static int
2415 ftrace_filter_release(struct inode *inode, struct file *file)
2416 {
2417 return ftrace_regex_release(inode, file, 1);
2418 }
2419
2420 static int
2421 ftrace_notrace_release(struct inode *inode, struct file *file)
2422 {
2423 return ftrace_regex_release(inode, file, 0);
2424 }
2425
2426 static const struct file_operations ftrace_avail_fops = {
2427 .open = ftrace_avail_open,
2428 .read = seq_read,
2429 .llseek = seq_lseek,
2430 .release = ftrace_avail_release,
2431 };
2432
2433 static const struct file_operations ftrace_failures_fops = {
2434 .open = ftrace_failures_open,
2435 .read = seq_read,
2436 .llseek = seq_lseek,
2437 .release = ftrace_avail_release,
2438 };
2439
2440 static const struct file_operations ftrace_filter_fops = {
2441 .open = ftrace_filter_open,
2442 .read = seq_read,
2443 .write = ftrace_filter_write,
2444 .llseek = ftrace_regex_lseek,
2445 .release = ftrace_filter_release,
2446 };
2447
2448 static const struct file_operations ftrace_notrace_fops = {
2449 .open = ftrace_notrace_open,
2450 .read = seq_read,
2451 .write = ftrace_notrace_write,
2452 .llseek = ftrace_regex_lseek,
2453 .release = ftrace_notrace_release,
2454 };
2455
2456 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2457
2458 static DEFINE_MUTEX(graph_lock);
2459
2460 int ftrace_graph_count;
2461 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
2462
2463 static void *
2464 g_next(struct seq_file *m, void *v, loff_t *pos)
2465 {
2466 unsigned long *array = m->private;
2467 int index = *pos;
2468
2469 (*pos)++;
2470
2471 if (index >= ftrace_graph_count)
2472 return NULL;
2473
2474 return &array[index];
2475 }
2476
2477 static void *g_start(struct seq_file *m, loff_t *pos)
2478 {
2479 void *p = NULL;
2480
2481 mutex_lock(&graph_lock);
2482
2483 /* Nothing, tell g_show to print all functions are enabled */
2484 if (!ftrace_graph_count && !*pos)
2485 return (void *)1;
2486
2487 p = g_next(m, p, pos);
2488
2489 return p;
2490 }
2491
2492 static void g_stop(struct seq_file *m, void *p)
2493 {
2494 mutex_unlock(&graph_lock);
2495 }
2496
2497 static int g_show(struct seq_file *m, void *v)
2498 {
2499 unsigned long *ptr = v;
2500 char str[KSYM_SYMBOL_LEN];
2501
2502 if (!ptr)
2503 return 0;
2504
2505 if (ptr == (unsigned long *)1) {
2506 seq_printf(m, "#### all functions enabled ####\n");
2507 return 0;
2508 }
2509
2510 kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
2511
2512 seq_printf(m, "%s\n", str);
2513
2514 return 0;
2515 }
2516
2517 static struct seq_operations ftrace_graph_seq_ops = {
2518 .start = g_start,
2519 .next = g_next,
2520 .stop = g_stop,
2521 .show = g_show,
2522 };
2523
2524 static int
2525 ftrace_graph_open(struct inode *inode, struct file *file)
2526 {
2527 int ret = 0;
2528
2529 if (unlikely(ftrace_disabled))
2530 return -ENODEV;
2531
2532 mutex_lock(&graph_lock);
2533 if ((file->f_mode & FMODE_WRITE) &&
2534 !(file->f_flags & O_APPEND)) {
2535 ftrace_graph_count = 0;
2536 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
2537 }
2538
2539 if (file->f_mode & FMODE_READ) {
2540 ret = seq_open(file, &ftrace_graph_seq_ops);
2541 if (!ret) {
2542 struct seq_file *m = file->private_data;
2543 m->private = ftrace_graph_funcs;
2544 }
2545 } else
2546 file->private_data = ftrace_graph_funcs;
2547 mutex_unlock(&graph_lock);
2548
2549 return ret;
2550 }
2551
2552 static int
2553 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
2554 {
2555 struct dyn_ftrace *rec;
2556 struct ftrace_page *pg;
2557 int search_len;
2558 int found = 0;
2559 int type, not;
2560 char *search;
2561 bool exists;
2562 int i;
2563
2564 if (ftrace_disabled)
2565 return -ENODEV;
2566
2567 /* decode regex */
2568 type = ftrace_setup_glob(buffer, strlen(buffer), &search, &not);
2569 if (not)
2570 return -EINVAL;
2571
2572 search_len = strlen(search);
2573
2574 mutex_lock(&ftrace_lock);
2575 do_for_each_ftrace_rec(pg, rec) {
2576
2577 if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
2578 break;
2579
2580 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
2581 continue;
2582
2583 if (ftrace_match_record(rec, search, search_len, type)) {
2584 /* ensure it is not already in the array */
2585 exists = false;
2586 for (i = 0; i < *idx; i++)
2587 if (array[i] == rec->ip) {
2588 exists = true;
2589 break;
2590 }
2591 if (!exists) {
2592 array[(*idx)++] = rec->ip;
2593 found = 1;
2594 }
2595 }
2596 } while_for_each_ftrace_rec();
2597
2598 mutex_unlock(&ftrace_lock);
2599
2600 return found ? 0 : -EINVAL;
2601 }
2602
2603 static ssize_t
2604 ftrace_graph_write(struct file *file, const char __user *ubuf,
2605 size_t cnt, loff_t *ppos)
2606 {
2607 unsigned char buffer[FTRACE_BUFF_MAX+1];
2608 unsigned long *array;
2609 size_t read = 0;
2610 ssize_t ret;
2611 int index = 0;
2612 char ch;
2613
2614 if (!cnt || cnt < 0)
2615 return 0;
2616
2617 mutex_lock(&graph_lock);
2618
2619 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
2620 ret = -EBUSY;
2621 goto out;
2622 }
2623
2624 if (file->f_mode & FMODE_READ) {
2625 struct seq_file *m = file->private_data;
2626 array = m->private;
2627 } else
2628 array = file->private_data;
2629
2630 ret = get_user(ch, ubuf++);
2631 if (ret)
2632 goto out;
2633 read++;
2634 cnt--;
2635
2636 /* skip white space */
2637 while (cnt && isspace(ch)) {
2638 ret = get_user(ch, ubuf++);
2639 if (ret)
2640 goto out;
2641 read++;
2642 cnt--;
2643 }
2644
2645 if (isspace(ch)) {
2646 *ppos += read;
2647 ret = read;
2648 goto out;
2649 }
2650
2651 while (cnt && !isspace(ch)) {
2652 if (index < FTRACE_BUFF_MAX)
2653 buffer[index++] = ch;
2654 else {
2655 ret = -EINVAL;
2656 goto out;
2657 }
2658 ret = get_user(ch, ubuf++);
2659 if (ret)
2660 goto out;
2661 read++;
2662 cnt--;
2663 }
2664 buffer[index] = 0;
2665
2666 /* we allow only one expression at a time */
2667 ret = ftrace_set_func(array, &ftrace_graph_count, buffer);
2668 if (ret)
2669 goto out;
2670
2671 file->f_pos += read;
2672
2673 ret = read;
2674 out:
2675 mutex_unlock(&graph_lock);
2676
2677 return ret;
2678 }
2679
2680 static const struct file_operations ftrace_graph_fops = {
2681 .open = ftrace_graph_open,
2682 .read = seq_read,
2683 .write = ftrace_graph_write,
2684 };
2685 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2686
2687 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
2688 {
2689 struct dentry *entry;
2690
2691 entry = debugfs_create_file("available_filter_functions", 0444,
2692 d_tracer, NULL, &ftrace_avail_fops);
2693 if (!entry)
2694 pr_warning("Could not create debugfs "
2695 "'available_filter_functions' entry\n");
2696
2697 entry = debugfs_create_file("failures", 0444,
2698 d_tracer, NULL, &ftrace_failures_fops);
2699 if (!entry)
2700 pr_warning("Could not create debugfs 'failures' entry\n");
2701
2702 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
2703 NULL, &ftrace_filter_fops);
2704 if (!entry)
2705 pr_warning("Could not create debugfs "
2706 "'set_ftrace_filter' entry\n");
2707
2708 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
2709 NULL, &ftrace_notrace_fops);
2710 if (!entry)
2711 pr_warning("Could not create debugfs "
2712 "'set_ftrace_notrace' entry\n");
2713
2714 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2715 entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
2716 NULL,
2717 &ftrace_graph_fops);
2718 if (!entry)
2719 pr_warning("Could not create debugfs "
2720 "'set_graph_function' entry\n");
2721 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2722
2723 return 0;
2724 }
2725
2726 static int ftrace_convert_nops(struct module *mod,
2727 unsigned long *start,
2728 unsigned long *end)
2729 {
2730 unsigned long *p;
2731 unsigned long addr;
2732 unsigned long flags;
2733
2734 mutex_lock(&ftrace_lock);
2735 p = start;
2736 while (p < end) {
2737 addr = ftrace_call_adjust(*p++);
2738 /*
2739 * Some architecture linkers will pad between
2740 * the different mcount_loc sections of different
2741 * object files to satisfy alignments.
2742 * Skip any NULL pointers.
2743 */
2744 if (!addr)
2745 continue;
2746 ftrace_record_ip(addr);
2747 }
2748
2749 /* disable interrupts to prevent kstop machine */
2750 local_irq_save(flags);
2751 ftrace_update_code(mod);
2752 local_irq_restore(flags);
2753 mutex_unlock(&ftrace_lock);
2754
2755 return 0;
2756 }
2757
2758 void ftrace_init_module(struct module *mod,
2759 unsigned long *start, unsigned long *end)
2760 {
2761 if (ftrace_disabled || start == end)
2762 return;
2763 ftrace_convert_nops(mod, start, end);
2764 }
2765
2766 extern unsigned long __start_mcount_loc[];
2767 extern unsigned long __stop_mcount_loc[];
2768
2769 void __init ftrace_init(void)
2770 {
2771 unsigned long count, addr, flags;
2772 int ret;
2773
2774 /* Keep the ftrace pointer to the stub */
2775 addr = (unsigned long)ftrace_stub;
2776
2777 local_irq_save(flags);
2778 ftrace_dyn_arch_init(&addr);
2779 local_irq_restore(flags);
2780
2781 /* ftrace_dyn_arch_init places the return code in addr */
2782 if (addr)
2783 goto failed;
2784
2785 count = __stop_mcount_loc - __start_mcount_loc;
2786
2787 ret = ftrace_dyn_table_alloc(count);
2788 if (ret)
2789 goto failed;
2790
2791 last_ftrace_enabled = ftrace_enabled = 1;
2792
2793 ret = ftrace_convert_nops(NULL,
2794 __start_mcount_loc,
2795 __stop_mcount_loc);
2796
2797 return;
2798 failed:
2799 ftrace_disabled = 1;
2800 }
2801
2802 #else
2803
2804 static int __init ftrace_nodyn_init(void)
2805 {
2806 ftrace_enabled = 1;
2807 return 0;
2808 }
2809 device_initcall(ftrace_nodyn_init);
2810
2811 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
2812 static inline void ftrace_startup_enable(int command) { }
2813 /* Keep as macros so we do not need to define the commands */
2814 # define ftrace_startup(command) do { } while (0)
2815 # define ftrace_shutdown(command) do { } while (0)
2816 # define ftrace_startup_sysctl() do { } while (0)
2817 # define ftrace_shutdown_sysctl() do { } while (0)
2818 #endif /* CONFIG_DYNAMIC_FTRACE */
2819
2820 static ssize_t
2821 ftrace_pid_read(struct file *file, char __user *ubuf,
2822 size_t cnt, loff_t *ppos)
2823 {
2824 char buf[64];
2825 int r;
2826
2827 if (ftrace_pid_trace == ftrace_swapper_pid)
2828 r = sprintf(buf, "swapper tasks\n");
2829 else if (ftrace_pid_trace)
2830 r = sprintf(buf, "%u\n", pid_vnr(ftrace_pid_trace));
2831 else
2832 r = sprintf(buf, "no pid\n");
2833
2834 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2835 }
2836
2837 static void clear_ftrace_swapper(void)
2838 {
2839 struct task_struct *p;
2840 int cpu;
2841
2842 get_online_cpus();
2843 for_each_online_cpu(cpu) {
2844 p = idle_task(cpu);
2845 clear_tsk_trace_trace(p);
2846 }
2847 put_online_cpus();
2848 }
2849
2850 static void set_ftrace_swapper(void)
2851 {
2852 struct task_struct *p;
2853 int cpu;
2854
2855 get_online_cpus();
2856 for_each_online_cpu(cpu) {
2857 p = idle_task(cpu);
2858 set_tsk_trace_trace(p);
2859 }
2860 put_online_cpus();
2861 }
2862
2863 static void clear_ftrace_pid(struct pid *pid)
2864 {
2865 struct task_struct *p;
2866
2867 rcu_read_lock();
2868 do_each_pid_task(pid, PIDTYPE_PID, p) {
2869 clear_tsk_trace_trace(p);
2870 } while_each_pid_task(pid, PIDTYPE_PID, p);
2871 rcu_read_unlock();
2872
2873 put_pid(pid);
2874 }
2875
2876 static void set_ftrace_pid(struct pid *pid)
2877 {
2878 struct task_struct *p;
2879
2880 rcu_read_lock();
2881 do_each_pid_task(pid, PIDTYPE_PID, p) {
2882 set_tsk_trace_trace(p);
2883 } while_each_pid_task(pid, PIDTYPE_PID, p);
2884 rcu_read_unlock();
2885 }
2886
2887 static void clear_ftrace_pid_task(struct pid **pid)
2888 {
2889 if (*pid == ftrace_swapper_pid)
2890 clear_ftrace_swapper();
2891 else
2892 clear_ftrace_pid(*pid);
2893
2894 *pid = NULL;
2895 }
2896
2897 static void set_ftrace_pid_task(struct pid *pid)
2898 {
2899 if (pid == ftrace_swapper_pid)
2900 set_ftrace_swapper();
2901 else
2902 set_ftrace_pid(pid);
2903 }
2904
2905 static ssize_t
2906 ftrace_pid_write(struct file *filp, const char __user *ubuf,
2907 size_t cnt, loff_t *ppos)
2908 {
2909 struct pid *pid;
2910 char buf[64];
2911 long val;
2912 int ret;
2913
2914 if (cnt >= sizeof(buf))
2915 return -EINVAL;
2916
2917 if (copy_from_user(&buf, ubuf, cnt))
2918 return -EFAULT;
2919
2920 buf[cnt] = 0;
2921
2922 ret = strict_strtol(buf, 10, &val);
2923 if (ret < 0)
2924 return ret;
2925
2926 mutex_lock(&ftrace_lock);
2927 if (val < 0) {
2928 /* disable pid tracing */
2929 if (!ftrace_pid_trace)
2930 goto out;
2931
2932 clear_ftrace_pid_task(&ftrace_pid_trace);
2933
2934 } else {
2935 /* swapper task is special */
2936 if (!val) {
2937 pid = ftrace_swapper_pid;
2938 if (pid == ftrace_pid_trace)
2939 goto out;
2940 } else {
2941 pid = find_get_pid(val);
2942
2943 if (pid == ftrace_pid_trace) {
2944 put_pid(pid);
2945 goto out;
2946 }
2947 }
2948
2949 if (ftrace_pid_trace)
2950 clear_ftrace_pid_task(&ftrace_pid_trace);
2951
2952 if (!pid)
2953 goto out;
2954
2955 ftrace_pid_trace = pid;
2956
2957 set_ftrace_pid_task(ftrace_pid_trace);
2958 }
2959
2960 /* update the function call */
2961 ftrace_update_pid_func();
2962 ftrace_startup_enable(0);
2963
2964 out:
2965 mutex_unlock(&ftrace_lock);
2966
2967 return cnt;
2968 }
2969
2970 static const struct file_operations ftrace_pid_fops = {
2971 .read = ftrace_pid_read,
2972 .write = ftrace_pid_write,
2973 };
2974
2975 static __init int ftrace_init_debugfs(void)
2976 {
2977 struct dentry *d_tracer;
2978 struct dentry *entry;
2979
2980 d_tracer = tracing_init_dentry();
2981 if (!d_tracer)
2982 return 0;
2983
2984 ftrace_init_dyn_debugfs(d_tracer);
2985
2986 entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
2987 NULL, &ftrace_pid_fops);
2988 if (!entry)
2989 pr_warning("Could not create debugfs "
2990 "'set_ftrace_pid' entry\n");
2991
2992 ftrace_profile_debugfs(d_tracer);
2993
2994 return 0;
2995 }
2996 fs_initcall(ftrace_init_debugfs);
2997
2998 /**
2999 * ftrace_kill - kill ftrace
3000 *
3001 * This function should be used by panic code. It stops ftrace
3002 * but in a not so nice way. If you need to simply kill ftrace
3003 * from a non-atomic section, use ftrace_kill.
3004 */
3005 void ftrace_kill(void)
3006 {
3007 ftrace_disabled = 1;
3008 ftrace_enabled = 0;
3009 clear_ftrace_function();
3010 }
3011
3012 /**
3013 * register_ftrace_function - register a function for profiling
3014 * @ops - ops structure that holds the function for profiling.
3015 *
3016 * Register a function to be called by all functions in the
3017 * kernel.
3018 *
3019 * Note: @ops->func and all the functions it calls must be labeled
3020 * with "notrace", otherwise it will go into a
3021 * recursive loop.
3022 */
3023 int register_ftrace_function(struct ftrace_ops *ops)
3024 {
3025 int ret;
3026
3027 if (unlikely(ftrace_disabled))
3028 return -1;
3029
3030 mutex_lock(&ftrace_lock);
3031
3032 ret = __register_ftrace_function(ops);
3033 ftrace_startup(0);
3034
3035 mutex_unlock(&ftrace_lock);
3036 return ret;
3037 }
3038
3039 /**
3040 * unregister_ftrace_function - unregister a function for profiling.
3041 * @ops - ops structure that holds the function to unregister
3042 *
3043 * Unregister a function that was added to be called by ftrace profiling.
3044 */
3045 int unregister_ftrace_function(struct ftrace_ops *ops)
3046 {
3047 int ret;
3048
3049 mutex_lock(&ftrace_lock);
3050 ret = __unregister_ftrace_function(ops);
3051 ftrace_shutdown(0);
3052 mutex_unlock(&ftrace_lock);
3053
3054 return ret;
3055 }
3056
3057 int
3058 ftrace_enable_sysctl(struct ctl_table *table, int write,
3059 struct file *file, void __user *buffer, size_t *lenp,
3060 loff_t *ppos)
3061 {
3062 int ret;
3063
3064 if (unlikely(ftrace_disabled))
3065 return -ENODEV;
3066
3067 mutex_lock(&ftrace_lock);
3068
3069 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
3070
3071 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
3072 goto out;
3073
3074 last_ftrace_enabled = ftrace_enabled;
3075
3076 if (ftrace_enabled) {
3077
3078 ftrace_startup_sysctl();
3079
3080 /* we are starting ftrace again */
3081 if (ftrace_list != &ftrace_list_end) {
3082 if (ftrace_list->next == &ftrace_list_end)
3083 ftrace_trace_function = ftrace_list->func;
3084 else
3085 ftrace_trace_function = ftrace_list_func;
3086 }
3087
3088 } else {
3089 /* stopping ftrace calls (just send to ftrace_stub) */
3090 ftrace_trace_function = ftrace_stub;
3091
3092 ftrace_shutdown_sysctl();
3093 }
3094
3095 out:
3096 mutex_unlock(&ftrace_lock);
3097 return ret;
3098 }
3099
3100 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3101
3102 static atomic_t ftrace_graph_active;
3103 static struct notifier_block ftrace_suspend_notifier;
3104
3105 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
3106 {
3107 return 0;
3108 }
3109
3110 /* The callbacks that hook a function */
3111 trace_func_graph_ret_t ftrace_graph_return =
3112 (trace_func_graph_ret_t)ftrace_stub;
3113 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
3114
3115 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
3116 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
3117 {
3118 int i;
3119 int ret = 0;
3120 unsigned long flags;
3121 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
3122 struct task_struct *g, *t;
3123
3124 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
3125 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
3126 * sizeof(struct ftrace_ret_stack),
3127 GFP_KERNEL);
3128 if (!ret_stack_list[i]) {
3129 start = 0;
3130 end = i;
3131 ret = -ENOMEM;
3132 goto free;
3133 }
3134 }
3135
3136 read_lock_irqsave(&tasklist_lock, flags);
3137 do_each_thread(g, t) {
3138 if (start == end) {
3139 ret = -EAGAIN;
3140 goto unlock;
3141 }
3142
3143 if (t->ret_stack == NULL) {
3144 t->curr_ret_stack = -1;
3145 /* Make sure IRQs see the -1 first: */
3146 barrier();
3147 t->ret_stack = ret_stack_list[start++];
3148 atomic_set(&t->tracing_graph_pause, 0);
3149 atomic_set(&t->trace_overrun, 0);
3150 }
3151 } while_each_thread(g, t);
3152
3153 unlock:
3154 read_unlock_irqrestore(&tasklist_lock, flags);
3155 free:
3156 for (i = start; i < end; i++)
3157 kfree(ret_stack_list[i]);
3158 return ret;
3159 }
3160
3161 static void
3162 ftrace_graph_probe_sched_switch(struct rq *__rq, struct task_struct *prev,
3163 struct task_struct *next)
3164 {
3165 unsigned long long timestamp;
3166 int index;
3167
3168 /*
3169 * Does the user want to count the time a function was asleep.
3170 * If so, do not update the time stamps.
3171 */
3172 if (trace_flags & TRACE_ITER_SLEEP_TIME)
3173 return;
3174
3175 timestamp = trace_clock_local();
3176
3177 prev->ftrace_timestamp = timestamp;
3178
3179 /* only process tasks that we timestamped */
3180 if (!next->ftrace_timestamp)
3181 return;
3182
3183 /*
3184 * Update all the counters in next to make up for the
3185 * time next was sleeping.
3186 */
3187 timestamp -= next->ftrace_timestamp;
3188
3189 for (index = next->curr_ret_stack; index >= 0; index--)
3190 next->ret_stack[index].calltime += timestamp;
3191 }
3192
3193 /* Allocate a return stack for each task */
3194 static int start_graph_tracing(void)
3195 {
3196 struct ftrace_ret_stack **ret_stack_list;
3197 int ret, cpu;
3198
3199 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
3200 sizeof(struct ftrace_ret_stack *),
3201 GFP_KERNEL);
3202
3203 if (!ret_stack_list)
3204 return -ENOMEM;
3205
3206 /* The cpu_boot init_task->ret_stack will never be freed */
3207 for_each_online_cpu(cpu)
3208 ftrace_graph_init_task(idle_task(cpu));
3209
3210 do {
3211 ret = alloc_retstack_tasklist(ret_stack_list);
3212 } while (ret == -EAGAIN);
3213
3214 if (!ret) {
3215 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch);
3216 if (ret)
3217 pr_info("ftrace_graph: Couldn't activate tracepoint"
3218 " probe to kernel_sched_switch\n");
3219 }
3220
3221 kfree(ret_stack_list);
3222 return ret;
3223 }
3224
3225 /*
3226 * Hibernation protection.
3227 * The state of the current task is too much unstable during
3228 * suspend/restore to disk. We want to protect against that.
3229 */
3230 static int
3231 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
3232 void *unused)
3233 {
3234 switch (state) {
3235 case PM_HIBERNATION_PREPARE:
3236 pause_graph_tracing();
3237 break;
3238
3239 case PM_POST_HIBERNATION:
3240 unpause_graph_tracing();
3241 break;
3242 }
3243 return NOTIFY_DONE;
3244 }
3245
3246 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
3247 trace_func_graph_ent_t entryfunc)
3248 {
3249 int ret = 0;
3250
3251 mutex_lock(&ftrace_lock);
3252
3253 /* we currently allow only one tracer registered at a time */
3254 if (atomic_read(&ftrace_graph_active)) {
3255 ret = -EBUSY;
3256 goto out;
3257 }
3258
3259 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
3260 register_pm_notifier(&ftrace_suspend_notifier);
3261
3262 atomic_inc(&ftrace_graph_active);
3263 ret = start_graph_tracing();
3264 if (ret) {
3265 atomic_dec(&ftrace_graph_active);
3266 goto out;
3267 }
3268
3269 ftrace_graph_return = retfunc;
3270 ftrace_graph_entry = entryfunc;
3271
3272 ftrace_startup(FTRACE_START_FUNC_RET);
3273
3274 out:
3275 mutex_unlock(&ftrace_lock);
3276 return ret;
3277 }
3278
3279 void unregister_ftrace_graph(void)
3280 {
3281 mutex_lock(&ftrace_lock);
3282
3283 atomic_dec(&ftrace_graph_active);
3284 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch);
3285 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
3286 ftrace_graph_entry = ftrace_graph_entry_stub;
3287 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
3288 unregister_pm_notifier(&ftrace_suspend_notifier);
3289
3290 mutex_unlock(&ftrace_lock);
3291 }
3292
3293 /* Allocate a return stack for newly created task */
3294 void ftrace_graph_init_task(struct task_struct *t)
3295 {
3296 if (atomic_read(&ftrace_graph_active)) {
3297 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
3298 * sizeof(struct ftrace_ret_stack),
3299 GFP_KERNEL);
3300 if (!t->ret_stack)
3301 return;
3302 t->curr_ret_stack = -1;
3303 atomic_set(&t->tracing_graph_pause, 0);
3304 atomic_set(&t->trace_overrun, 0);
3305 t->ftrace_timestamp = 0;
3306 } else
3307 t->ret_stack = NULL;
3308 }
3309
3310 void ftrace_graph_exit_task(struct task_struct *t)
3311 {
3312 struct ftrace_ret_stack *ret_stack = t->ret_stack;
3313
3314 t->ret_stack = NULL;
3315 /* NULL must become visible to IRQs before we free it: */
3316 barrier();
3317
3318 kfree(ret_stack);
3319 }
3320
3321 void ftrace_graph_stop(void)
3322 {
3323 ftrace_stop();
3324 }
3325 #endif
3326
This page took 0.0947750000000001 seconds and 6 git commands to generate.