seq_file: constify seq_operations
[deliverable/linux.git] / kernel / trace / ftrace.c
1 /*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/kprobes.h>
26 #include <linux/ftrace.h>
27 #include <linux/sysctl.h>
28 #include <linux/ctype.h>
29 #include <linux/list.h>
30 #include <linux/hash.h>
31
32 #include <trace/events/sched.h>
33
34 #include <asm/ftrace.h>
35 #include <asm/setup.h>
36
37 #include "trace_output.h"
38 #include "trace_stat.h"
39
40 #define FTRACE_WARN_ON(cond) \
41 do { \
42 if (WARN_ON(cond)) \
43 ftrace_kill(); \
44 } while (0)
45
46 #define FTRACE_WARN_ON_ONCE(cond) \
47 do { \
48 if (WARN_ON_ONCE(cond)) \
49 ftrace_kill(); \
50 } while (0)
51
52 /* hash bits for specific function selection */
53 #define FTRACE_HASH_BITS 7
54 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
55
56 /* ftrace_enabled is a method to turn ftrace on or off */
57 int ftrace_enabled __read_mostly;
58 static int last_ftrace_enabled;
59
60 /* Quick disabling of function tracer. */
61 int function_trace_stop;
62
63 /*
64 * ftrace_disabled is set when an anomaly is discovered.
65 * ftrace_disabled is much stronger than ftrace_enabled.
66 */
67 static int ftrace_disabled __read_mostly;
68
69 static DEFINE_MUTEX(ftrace_lock);
70
71 static struct ftrace_ops ftrace_list_end __read_mostly =
72 {
73 .func = ftrace_stub,
74 };
75
76 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
77 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
78 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
79 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
80
81 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
82 {
83 struct ftrace_ops *op = ftrace_list;
84
85 /* in case someone actually ports this to alpha! */
86 read_barrier_depends();
87
88 while (op != &ftrace_list_end) {
89 /* silly alpha */
90 read_barrier_depends();
91 op->func(ip, parent_ip);
92 op = op->next;
93 };
94 }
95
96 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
97 {
98 if (!test_tsk_trace_trace(current))
99 return;
100
101 ftrace_pid_function(ip, parent_ip);
102 }
103
104 static void set_ftrace_pid_function(ftrace_func_t func)
105 {
106 /* do not set ftrace_pid_function to itself! */
107 if (func != ftrace_pid_func)
108 ftrace_pid_function = func;
109 }
110
111 /**
112 * clear_ftrace_function - reset the ftrace function
113 *
114 * This NULLs the ftrace function and in essence stops
115 * tracing. There may be lag
116 */
117 void clear_ftrace_function(void)
118 {
119 ftrace_trace_function = ftrace_stub;
120 __ftrace_trace_function = ftrace_stub;
121 ftrace_pid_function = ftrace_stub;
122 }
123
124 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
125 /*
126 * For those archs that do not test ftrace_trace_stop in their
127 * mcount call site, we need to do it from C.
128 */
129 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
130 {
131 if (function_trace_stop)
132 return;
133
134 __ftrace_trace_function(ip, parent_ip);
135 }
136 #endif
137
138 static int __register_ftrace_function(struct ftrace_ops *ops)
139 {
140 ops->next = ftrace_list;
141 /*
142 * We are entering ops into the ftrace_list but another
143 * CPU might be walking that list. We need to make sure
144 * the ops->next pointer is valid before another CPU sees
145 * the ops pointer included into the ftrace_list.
146 */
147 smp_wmb();
148 ftrace_list = ops;
149
150 if (ftrace_enabled) {
151 ftrace_func_t func;
152
153 if (ops->next == &ftrace_list_end)
154 func = ops->func;
155 else
156 func = ftrace_list_func;
157
158 if (ftrace_pid_trace) {
159 set_ftrace_pid_function(func);
160 func = ftrace_pid_func;
161 }
162
163 /*
164 * For one func, simply call it directly.
165 * For more than one func, call the chain.
166 */
167 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
168 ftrace_trace_function = func;
169 #else
170 __ftrace_trace_function = func;
171 ftrace_trace_function = ftrace_test_stop_func;
172 #endif
173 }
174
175 return 0;
176 }
177
178 static int __unregister_ftrace_function(struct ftrace_ops *ops)
179 {
180 struct ftrace_ops **p;
181
182 /*
183 * If we are removing the last function, then simply point
184 * to the ftrace_stub.
185 */
186 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
187 ftrace_trace_function = ftrace_stub;
188 ftrace_list = &ftrace_list_end;
189 return 0;
190 }
191
192 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
193 if (*p == ops)
194 break;
195
196 if (*p != ops)
197 return -1;
198
199 *p = (*p)->next;
200
201 if (ftrace_enabled) {
202 /* If we only have one func left, then call that directly */
203 if (ftrace_list->next == &ftrace_list_end) {
204 ftrace_func_t func = ftrace_list->func;
205
206 if (ftrace_pid_trace) {
207 set_ftrace_pid_function(func);
208 func = ftrace_pid_func;
209 }
210 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
211 ftrace_trace_function = func;
212 #else
213 __ftrace_trace_function = func;
214 #endif
215 }
216 }
217
218 return 0;
219 }
220
221 static void ftrace_update_pid_func(void)
222 {
223 ftrace_func_t func;
224
225 if (ftrace_trace_function == ftrace_stub)
226 return;
227
228 func = ftrace_trace_function;
229
230 if (ftrace_pid_trace) {
231 set_ftrace_pid_function(func);
232 func = ftrace_pid_func;
233 } else {
234 if (func == ftrace_pid_func)
235 func = ftrace_pid_function;
236 }
237
238 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
239 ftrace_trace_function = func;
240 #else
241 __ftrace_trace_function = func;
242 #endif
243 }
244
245 #ifdef CONFIG_FUNCTION_PROFILER
246 struct ftrace_profile {
247 struct hlist_node node;
248 unsigned long ip;
249 unsigned long counter;
250 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
251 unsigned long long time;
252 #endif
253 };
254
255 struct ftrace_profile_page {
256 struct ftrace_profile_page *next;
257 unsigned long index;
258 struct ftrace_profile records[];
259 };
260
261 struct ftrace_profile_stat {
262 atomic_t disabled;
263 struct hlist_head *hash;
264 struct ftrace_profile_page *pages;
265 struct ftrace_profile_page *start;
266 struct tracer_stat stat;
267 };
268
269 #define PROFILE_RECORDS_SIZE \
270 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
271
272 #define PROFILES_PER_PAGE \
273 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
274
275 static int ftrace_profile_bits __read_mostly;
276 static int ftrace_profile_enabled __read_mostly;
277
278 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
279 static DEFINE_MUTEX(ftrace_profile_lock);
280
281 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
282
283 #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
284
285 static void *
286 function_stat_next(void *v, int idx)
287 {
288 struct ftrace_profile *rec = v;
289 struct ftrace_profile_page *pg;
290
291 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
292
293 again:
294 if (idx != 0)
295 rec++;
296
297 if ((void *)rec >= (void *)&pg->records[pg->index]) {
298 pg = pg->next;
299 if (!pg)
300 return NULL;
301 rec = &pg->records[0];
302 if (!rec->counter)
303 goto again;
304 }
305
306 return rec;
307 }
308
309 static void *function_stat_start(struct tracer_stat *trace)
310 {
311 struct ftrace_profile_stat *stat =
312 container_of(trace, struct ftrace_profile_stat, stat);
313
314 if (!stat || !stat->start)
315 return NULL;
316
317 return function_stat_next(&stat->start->records[0], 0);
318 }
319
320 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
321 /* function graph compares on total time */
322 static int function_stat_cmp(void *p1, void *p2)
323 {
324 struct ftrace_profile *a = p1;
325 struct ftrace_profile *b = p2;
326
327 if (a->time < b->time)
328 return -1;
329 if (a->time > b->time)
330 return 1;
331 else
332 return 0;
333 }
334 #else
335 /* not function graph compares against hits */
336 static int function_stat_cmp(void *p1, void *p2)
337 {
338 struct ftrace_profile *a = p1;
339 struct ftrace_profile *b = p2;
340
341 if (a->counter < b->counter)
342 return -1;
343 if (a->counter > b->counter)
344 return 1;
345 else
346 return 0;
347 }
348 #endif
349
350 static int function_stat_headers(struct seq_file *m)
351 {
352 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
353 seq_printf(m, " Function "
354 "Hit Time Avg\n"
355 " -------- "
356 "--- ---- ---\n");
357 #else
358 seq_printf(m, " Function Hit\n"
359 " -------- ---\n");
360 #endif
361 return 0;
362 }
363
364 static int function_stat_show(struct seq_file *m, void *v)
365 {
366 struct ftrace_profile *rec = v;
367 char str[KSYM_SYMBOL_LEN];
368 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
369 static DEFINE_MUTEX(mutex);
370 static struct trace_seq s;
371 unsigned long long avg;
372 #endif
373
374 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
375 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
376
377 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
378 seq_printf(m, " ");
379 avg = rec->time;
380 do_div(avg, rec->counter);
381
382 mutex_lock(&mutex);
383 trace_seq_init(&s);
384 trace_print_graph_duration(rec->time, &s);
385 trace_seq_puts(&s, " ");
386 trace_print_graph_duration(avg, &s);
387 trace_print_seq(m, &s);
388 mutex_unlock(&mutex);
389 #endif
390 seq_putc(m, '\n');
391
392 return 0;
393 }
394
395 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
396 {
397 struct ftrace_profile_page *pg;
398
399 pg = stat->pages = stat->start;
400
401 while (pg) {
402 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
403 pg->index = 0;
404 pg = pg->next;
405 }
406
407 memset(stat->hash, 0,
408 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
409 }
410
411 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
412 {
413 struct ftrace_profile_page *pg;
414 int functions;
415 int pages;
416 int i;
417
418 /* If we already allocated, do nothing */
419 if (stat->pages)
420 return 0;
421
422 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
423 if (!stat->pages)
424 return -ENOMEM;
425
426 #ifdef CONFIG_DYNAMIC_FTRACE
427 functions = ftrace_update_tot_cnt;
428 #else
429 /*
430 * We do not know the number of functions that exist because
431 * dynamic tracing is what counts them. With past experience
432 * we have around 20K functions. That should be more than enough.
433 * It is highly unlikely we will execute every function in
434 * the kernel.
435 */
436 functions = 20000;
437 #endif
438
439 pg = stat->start = stat->pages;
440
441 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
442
443 for (i = 0; i < pages; i++) {
444 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
445 if (!pg->next)
446 goto out_free;
447 pg = pg->next;
448 }
449
450 return 0;
451
452 out_free:
453 pg = stat->start;
454 while (pg) {
455 unsigned long tmp = (unsigned long)pg;
456
457 pg = pg->next;
458 free_page(tmp);
459 }
460
461 free_page((unsigned long)stat->pages);
462 stat->pages = NULL;
463 stat->start = NULL;
464
465 return -ENOMEM;
466 }
467
468 static int ftrace_profile_init_cpu(int cpu)
469 {
470 struct ftrace_profile_stat *stat;
471 int size;
472
473 stat = &per_cpu(ftrace_profile_stats, cpu);
474
475 if (stat->hash) {
476 /* If the profile is already created, simply reset it */
477 ftrace_profile_reset(stat);
478 return 0;
479 }
480
481 /*
482 * We are profiling all functions, but usually only a few thousand
483 * functions are hit. We'll make a hash of 1024 items.
484 */
485 size = FTRACE_PROFILE_HASH_SIZE;
486
487 stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
488
489 if (!stat->hash)
490 return -ENOMEM;
491
492 if (!ftrace_profile_bits) {
493 size--;
494
495 for (; size; size >>= 1)
496 ftrace_profile_bits++;
497 }
498
499 /* Preallocate the function profiling pages */
500 if (ftrace_profile_pages_init(stat) < 0) {
501 kfree(stat->hash);
502 stat->hash = NULL;
503 return -ENOMEM;
504 }
505
506 return 0;
507 }
508
509 static int ftrace_profile_init(void)
510 {
511 int cpu;
512 int ret = 0;
513
514 for_each_online_cpu(cpu) {
515 ret = ftrace_profile_init_cpu(cpu);
516 if (ret)
517 break;
518 }
519
520 return ret;
521 }
522
523 /* interrupts must be disabled */
524 static struct ftrace_profile *
525 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
526 {
527 struct ftrace_profile *rec;
528 struct hlist_head *hhd;
529 struct hlist_node *n;
530 unsigned long key;
531
532 key = hash_long(ip, ftrace_profile_bits);
533 hhd = &stat->hash[key];
534
535 if (hlist_empty(hhd))
536 return NULL;
537
538 hlist_for_each_entry_rcu(rec, n, hhd, node) {
539 if (rec->ip == ip)
540 return rec;
541 }
542
543 return NULL;
544 }
545
546 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
547 struct ftrace_profile *rec)
548 {
549 unsigned long key;
550
551 key = hash_long(rec->ip, ftrace_profile_bits);
552 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
553 }
554
555 /*
556 * The memory is already allocated, this simply finds a new record to use.
557 */
558 static struct ftrace_profile *
559 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
560 {
561 struct ftrace_profile *rec = NULL;
562
563 /* prevent recursion (from NMIs) */
564 if (atomic_inc_return(&stat->disabled) != 1)
565 goto out;
566
567 /*
568 * Try to find the function again since an NMI
569 * could have added it
570 */
571 rec = ftrace_find_profiled_func(stat, ip);
572 if (rec)
573 goto out;
574
575 if (stat->pages->index == PROFILES_PER_PAGE) {
576 if (!stat->pages->next)
577 goto out;
578 stat->pages = stat->pages->next;
579 }
580
581 rec = &stat->pages->records[stat->pages->index++];
582 rec->ip = ip;
583 ftrace_add_profile(stat, rec);
584
585 out:
586 atomic_dec(&stat->disabled);
587
588 return rec;
589 }
590
591 static void
592 function_profile_call(unsigned long ip, unsigned long parent_ip)
593 {
594 struct ftrace_profile_stat *stat;
595 struct ftrace_profile *rec;
596 unsigned long flags;
597
598 if (!ftrace_profile_enabled)
599 return;
600
601 local_irq_save(flags);
602
603 stat = &__get_cpu_var(ftrace_profile_stats);
604 if (!stat->hash || !ftrace_profile_enabled)
605 goto out;
606
607 rec = ftrace_find_profiled_func(stat, ip);
608 if (!rec) {
609 rec = ftrace_profile_alloc(stat, ip);
610 if (!rec)
611 goto out;
612 }
613
614 rec->counter++;
615 out:
616 local_irq_restore(flags);
617 }
618
619 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
620 static int profile_graph_entry(struct ftrace_graph_ent *trace)
621 {
622 function_profile_call(trace->func, 0);
623 return 1;
624 }
625
626 static void profile_graph_return(struct ftrace_graph_ret *trace)
627 {
628 struct ftrace_profile_stat *stat;
629 unsigned long long calltime;
630 struct ftrace_profile *rec;
631 unsigned long flags;
632
633 local_irq_save(flags);
634 stat = &__get_cpu_var(ftrace_profile_stats);
635 if (!stat->hash || !ftrace_profile_enabled)
636 goto out;
637
638 calltime = trace->rettime - trace->calltime;
639
640 if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
641 int index;
642
643 index = trace->depth;
644
645 /* Append this call time to the parent time to subtract */
646 if (index)
647 current->ret_stack[index - 1].subtime += calltime;
648
649 if (current->ret_stack[index].subtime < calltime)
650 calltime -= current->ret_stack[index].subtime;
651 else
652 calltime = 0;
653 }
654
655 rec = ftrace_find_profiled_func(stat, trace->func);
656 if (rec)
657 rec->time += calltime;
658
659 out:
660 local_irq_restore(flags);
661 }
662
663 static int register_ftrace_profiler(void)
664 {
665 return register_ftrace_graph(&profile_graph_return,
666 &profile_graph_entry);
667 }
668
669 static void unregister_ftrace_profiler(void)
670 {
671 unregister_ftrace_graph();
672 }
673 #else
674 static struct ftrace_ops ftrace_profile_ops __read_mostly =
675 {
676 .func = function_profile_call,
677 };
678
679 static int register_ftrace_profiler(void)
680 {
681 return register_ftrace_function(&ftrace_profile_ops);
682 }
683
684 static void unregister_ftrace_profiler(void)
685 {
686 unregister_ftrace_function(&ftrace_profile_ops);
687 }
688 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
689
690 static ssize_t
691 ftrace_profile_write(struct file *filp, const char __user *ubuf,
692 size_t cnt, loff_t *ppos)
693 {
694 unsigned long val;
695 char buf[64]; /* big enough to hold a number */
696 int ret;
697
698 if (cnt >= sizeof(buf))
699 return -EINVAL;
700
701 if (copy_from_user(&buf, ubuf, cnt))
702 return -EFAULT;
703
704 buf[cnt] = 0;
705
706 ret = strict_strtoul(buf, 10, &val);
707 if (ret < 0)
708 return ret;
709
710 val = !!val;
711
712 mutex_lock(&ftrace_profile_lock);
713 if (ftrace_profile_enabled ^ val) {
714 if (val) {
715 ret = ftrace_profile_init();
716 if (ret < 0) {
717 cnt = ret;
718 goto out;
719 }
720
721 ret = register_ftrace_profiler();
722 if (ret < 0) {
723 cnt = ret;
724 goto out;
725 }
726 ftrace_profile_enabled = 1;
727 } else {
728 ftrace_profile_enabled = 0;
729 /*
730 * unregister_ftrace_profiler calls stop_machine
731 * so this acts like an synchronize_sched.
732 */
733 unregister_ftrace_profiler();
734 }
735 }
736 out:
737 mutex_unlock(&ftrace_profile_lock);
738
739 filp->f_pos += cnt;
740
741 return cnt;
742 }
743
744 static ssize_t
745 ftrace_profile_read(struct file *filp, char __user *ubuf,
746 size_t cnt, loff_t *ppos)
747 {
748 char buf[64]; /* big enough to hold a number */
749 int r;
750
751 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
752 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
753 }
754
755 static const struct file_operations ftrace_profile_fops = {
756 .open = tracing_open_generic,
757 .read = ftrace_profile_read,
758 .write = ftrace_profile_write,
759 };
760
761 /* used to initialize the real stat files */
762 static struct tracer_stat function_stats __initdata = {
763 .name = "functions",
764 .stat_start = function_stat_start,
765 .stat_next = function_stat_next,
766 .stat_cmp = function_stat_cmp,
767 .stat_headers = function_stat_headers,
768 .stat_show = function_stat_show
769 };
770
771 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
772 {
773 struct ftrace_profile_stat *stat;
774 struct dentry *entry;
775 char *name;
776 int ret;
777 int cpu;
778
779 for_each_possible_cpu(cpu) {
780 stat = &per_cpu(ftrace_profile_stats, cpu);
781
782 /* allocate enough for function name + cpu number */
783 name = kmalloc(32, GFP_KERNEL);
784 if (!name) {
785 /*
786 * The files created are permanent, if something happens
787 * we still do not free memory.
788 */
789 WARN(1,
790 "Could not allocate stat file for cpu %d\n",
791 cpu);
792 return;
793 }
794 stat->stat = function_stats;
795 snprintf(name, 32, "function%d", cpu);
796 stat->stat.name = name;
797 ret = register_stat_tracer(&stat->stat);
798 if (ret) {
799 WARN(1,
800 "Could not register function stat for cpu %d\n",
801 cpu);
802 kfree(name);
803 return;
804 }
805 }
806
807 entry = debugfs_create_file("function_profile_enabled", 0644,
808 d_tracer, NULL, &ftrace_profile_fops);
809 if (!entry)
810 pr_warning("Could not create debugfs "
811 "'function_profile_enabled' entry\n");
812 }
813
814 #else /* CONFIG_FUNCTION_PROFILER */
815 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
816 {
817 }
818 #endif /* CONFIG_FUNCTION_PROFILER */
819
820 /* set when tracing only a pid */
821 struct pid *ftrace_pid_trace;
822 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
823
824 #ifdef CONFIG_DYNAMIC_FTRACE
825
826 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
827 # error Dynamic ftrace depends on MCOUNT_RECORD
828 #endif
829
830 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
831
832 struct ftrace_func_probe {
833 struct hlist_node node;
834 struct ftrace_probe_ops *ops;
835 unsigned long flags;
836 unsigned long ip;
837 void *data;
838 struct rcu_head rcu;
839 };
840
841 enum {
842 FTRACE_ENABLE_CALLS = (1 << 0),
843 FTRACE_DISABLE_CALLS = (1 << 1),
844 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
845 FTRACE_ENABLE_MCOUNT = (1 << 3),
846 FTRACE_DISABLE_MCOUNT = (1 << 4),
847 FTRACE_START_FUNC_RET = (1 << 5),
848 FTRACE_STOP_FUNC_RET = (1 << 6),
849 };
850
851 static int ftrace_filtered;
852
853 static struct dyn_ftrace *ftrace_new_addrs;
854
855 static DEFINE_MUTEX(ftrace_regex_lock);
856
857 struct ftrace_page {
858 struct ftrace_page *next;
859 int index;
860 struct dyn_ftrace records[];
861 };
862
863 #define ENTRIES_PER_PAGE \
864 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
865
866 /* estimate from running different kernels */
867 #define NR_TO_INIT 10000
868
869 static struct ftrace_page *ftrace_pages_start;
870 static struct ftrace_page *ftrace_pages;
871
872 static struct dyn_ftrace *ftrace_free_records;
873
874 /*
875 * This is a double for. Do not use 'break' to break out of the loop,
876 * you must use a goto.
877 */
878 #define do_for_each_ftrace_rec(pg, rec) \
879 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
880 int _____i; \
881 for (_____i = 0; _____i < pg->index; _____i++) { \
882 rec = &pg->records[_____i];
883
884 #define while_for_each_ftrace_rec() \
885 } \
886 }
887
888 #ifdef CONFIG_KPROBES
889
890 static int frozen_record_count;
891
892 static inline void freeze_record(struct dyn_ftrace *rec)
893 {
894 if (!(rec->flags & FTRACE_FL_FROZEN)) {
895 rec->flags |= FTRACE_FL_FROZEN;
896 frozen_record_count++;
897 }
898 }
899
900 static inline void unfreeze_record(struct dyn_ftrace *rec)
901 {
902 if (rec->flags & FTRACE_FL_FROZEN) {
903 rec->flags &= ~FTRACE_FL_FROZEN;
904 frozen_record_count--;
905 }
906 }
907
908 static inline int record_frozen(struct dyn_ftrace *rec)
909 {
910 return rec->flags & FTRACE_FL_FROZEN;
911 }
912 #else
913 # define freeze_record(rec) ({ 0; })
914 # define unfreeze_record(rec) ({ 0; })
915 # define record_frozen(rec) ({ 0; })
916 #endif /* CONFIG_KPROBES */
917
918 static void ftrace_free_rec(struct dyn_ftrace *rec)
919 {
920 rec->freelist = ftrace_free_records;
921 ftrace_free_records = rec;
922 rec->flags |= FTRACE_FL_FREE;
923 }
924
925 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
926 {
927 struct dyn_ftrace *rec;
928
929 /* First check for freed records */
930 if (ftrace_free_records) {
931 rec = ftrace_free_records;
932
933 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
934 FTRACE_WARN_ON_ONCE(1);
935 ftrace_free_records = NULL;
936 return NULL;
937 }
938
939 ftrace_free_records = rec->freelist;
940 memset(rec, 0, sizeof(*rec));
941 return rec;
942 }
943
944 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
945 if (!ftrace_pages->next) {
946 /* allocate another page */
947 ftrace_pages->next =
948 (void *)get_zeroed_page(GFP_KERNEL);
949 if (!ftrace_pages->next)
950 return NULL;
951 }
952 ftrace_pages = ftrace_pages->next;
953 }
954
955 return &ftrace_pages->records[ftrace_pages->index++];
956 }
957
958 static struct dyn_ftrace *
959 ftrace_record_ip(unsigned long ip)
960 {
961 struct dyn_ftrace *rec;
962
963 if (ftrace_disabled)
964 return NULL;
965
966 rec = ftrace_alloc_dyn_node(ip);
967 if (!rec)
968 return NULL;
969
970 rec->ip = ip;
971 rec->newlist = ftrace_new_addrs;
972 ftrace_new_addrs = rec;
973
974 return rec;
975 }
976
977 static void print_ip_ins(const char *fmt, unsigned char *p)
978 {
979 int i;
980
981 printk(KERN_CONT "%s", fmt);
982
983 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
984 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
985 }
986
987 static void ftrace_bug(int failed, unsigned long ip)
988 {
989 switch (failed) {
990 case -EFAULT:
991 FTRACE_WARN_ON_ONCE(1);
992 pr_info("ftrace faulted on modifying ");
993 print_ip_sym(ip);
994 break;
995 case -EINVAL:
996 FTRACE_WARN_ON_ONCE(1);
997 pr_info("ftrace failed to modify ");
998 print_ip_sym(ip);
999 print_ip_ins(" actual: ", (unsigned char *)ip);
1000 printk(KERN_CONT "\n");
1001 break;
1002 case -EPERM:
1003 FTRACE_WARN_ON_ONCE(1);
1004 pr_info("ftrace faulted on writing ");
1005 print_ip_sym(ip);
1006 break;
1007 default:
1008 FTRACE_WARN_ON_ONCE(1);
1009 pr_info("ftrace faulted on unknown error ");
1010 print_ip_sym(ip);
1011 }
1012 }
1013
1014
1015 static int
1016 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1017 {
1018 unsigned long ftrace_addr;
1019 unsigned long flag = 0UL;
1020
1021 ftrace_addr = (unsigned long)FTRACE_ADDR;
1022
1023 /*
1024 * If this record is not to be traced or we want to disable it,
1025 * then disable it.
1026 *
1027 * If we want to enable it and filtering is off, then enable it.
1028 *
1029 * If we want to enable it and filtering is on, enable it only if
1030 * it's filtered
1031 */
1032 if (enable && !(rec->flags & FTRACE_FL_NOTRACE)) {
1033 if (!ftrace_filtered || (rec->flags & FTRACE_FL_FILTER))
1034 flag = FTRACE_FL_ENABLED;
1035 }
1036
1037 /* If the state of this record hasn't changed, then do nothing */
1038 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1039 return 0;
1040
1041 if (flag) {
1042 rec->flags |= FTRACE_FL_ENABLED;
1043 return ftrace_make_call(rec, ftrace_addr);
1044 }
1045
1046 rec->flags &= ~FTRACE_FL_ENABLED;
1047 return ftrace_make_nop(NULL, rec, ftrace_addr);
1048 }
1049
1050 static void ftrace_replace_code(int enable)
1051 {
1052 struct dyn_ftrace *rec;
1053 struct ftrace_page *pg;
1054 int failed;
1055
1056 do_for_each_ftrace_rec(pg, rec) {
1057 /*
1058 * Skip over free records, records that have
1059 * failed and not converted.
1060 */
1061 if (rec->flags & FTRACE_FL_FREE ||
1062 rec->flags & FTRACE_FL_FAILED ||
1063 !(rec->flags & FTRACE_FL_CONVERTED))
1064 continue;
1065
1066 /* ignore updates to this record's mcount site */
1067 if (get_kprobe((void *)rec->ip)) {
1068 freeze_record(rec);
1069 continue;
1070 } else {
1071 unfreeze_record(rec);
1072 }
1073
1074 failed = __ftrace_replace_code(rec, enable);
1075 if (failed) {
1076 rec->flags |= FTRACE_FL_FAILED;
1077 if ((system_state == SYSTEM_BOOTING) ||
1078 !core_kernel_text(rec->ip)) {
1079 ftrace_free_rec(rec);
1080 } else {
1081 ftrace_bug(failed, rec->ip);
1082 /* Stop processing */
1083 return;
1084 }
1085 }
1086 } while_for_each_ftrace_rec();
1087 }
1088
1089 static int
1090 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1091 {
1092 unsigned long ip;
1093 int ret;
1094
1095 ip = rec->ip;
1096
1097 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1098 if (ret) {
1099 ftrace_bug(ret, ip);
1100 rec->flags |= FTRACE_FL_FAILED;
1101 return 0;
1102 }
1103 return 1;
1104 }
1105
1106 /*
1107 * archs can override this function if they must do something
1108 * before the modifying code is performed.
1109 */
1110 int __weak ftrace_arch_code_modify_prepare(void)
1111 {
1112 return 0;
1113 }
1114
1115 /*
1116 * archs can override this function if they must do something
1117 * after the modifying code is performed.
1118 */
1119 int __weak ftrace_arch_code_modify_post_process(void)
1120 {
1121 return 0;
1122 }
1123
1124 static int __ftrace_modify_code(void *data)
1125 {
1126 int *command = data;
1127
1128 if (*command & FTRACE_ENABLE_CALLS)
1129 ftrace_replace_code(1);
1130 else if (*command & FTRACE_DISABLE_CALLS)
1131 ftrace_replace_code(0);
1132
1133 if (*command & FTRACE_UPDATE_TRACE_FUNC)
1134 ftrace_update_ftrace_func(ftrace_trace_function);
1135
1136 if (*command & FTRACE_START_FUNC_RET)
1137 ftrace_enable_ftrace_graph_caller();
1138 else if (*command & FTRACE_STOP_FUNC_RET)
1139 ftrace_disable_ftrace_graph_caller();
1140
1141 return 0;
1142 }
1143
1144 static void ftrace_run_update_code(int command)
1145 {
1146 int ret;
1147
1148 ret = ftrace_arch_code_modify_prepare();
1149 FTRACE_WARN_ON(ret);
1150 if (ret)
1151 return;
1152
1153 stop_machine(__ftrace_modify_code, &command, NULL);
1154
1155 ret = ftrace_arch_code_modify_post_process();
1156 FTRACE_WARN_ON(ret);
1157 }
1158
1159 static ftrace_func_t saved_ftrace_func;
1160 static int ftrace_start_up;
1161
1162 static void ftrace_startup_enable(int command)
1163 {
1164 if (saved_ftrace_func != ftrace_trace_function) {
1165 saved_ftrace_func = ftrace_trace_function;
1166 command |= FTRACE_UPDATE_TRACE_FUNC;
1167 }
1168
1169 if (!command || !ftrace_enabled)
1170 return;
1171
1172 ftrace_run_update_code(command);
1173 }
1174
1175 static void ftrace_startup(int command)
1176 {
1177 if (unlikely(ftrace_disabled))
1178 return;
1179
1180 ftrace_start_up++;
1181 command |= FTRACE_ENABLE_CALLS;
1182
1183 ftrace_startup_enable(command);
1184 }
1185
1186 static void ftrace_shutdown(int command)
1187 {
1188 if (unlikely(ftrace_disabled))
1189 return;
1190
1191 ftrace_start_up--;
1192 /*
1193 * Just warn in case of unbalance, no need to kill ftrace, it's not
1194 * critical but the ftrace_call callers may be never nopped again after
1195 * further ftrace uses.
1196 */
1197 WARN_ON_ONCE(ftrace_start_up < 0);
1198
1199 if (!ftrace_start_up)
1200 command |= FTRACE_DISABLE_CALLS;
1201
1202 if (saved_ftrace_func != ftrace_trace_function) {
1203 saved_ftrace_func = ftrace_trace_function;
1204 command |= FTRACE_UPDATE_TRACE_FUNC;
1205 }
1206
1207 if (!command || !ftrace_enabled)
1208 return;
1209
1210 ftrace_run_update_code(command);
1211 }
1212
1213 static void ftrace_startup_sysctl(void)
1214 {
1215 int command = FTRACE_ENABLE_MCOUNT;
1216
1217 if (unlikely(ftrace_disabled))
1218 return;
1219
1220 /* Force update next time */
1221 saved_ftrace_func = NULL;
1222 /* ftrace_start_up is true if we want ftrace running */
1223 if (ftrace_start_up)
1224 command |= FTRACE_ENABLE_CALLS;
1225
1226 ftrace_run_update_code(command);
1227 }
1228
1229 static void ftrace_shutdown_sysctl(void)
1230 {
1231 int command = FTRACE_DISABLE_MCOUNT;
1232
1233 if (unlikely(ftrace_disabled))
1234 return;
1235
1236 /* ftrace_start_up is true if ftrace is running */
1237 if (ftrace_start_up)
1238 command |= FTRACE_DISABLE_CALLS;
1239
1240 ftrace_run_update_code(command);
1241 }
1242
1243 static cycle_t ftrace_update_time;
1244 static unsigned long ftrace_update_cnt;
1245 unsigned long ftrace_update_tot_cnt;
1246
1247 static int ftrace_update_code(struct module *mod)
1248 {
1249 struct dyn_ftrace *p;
1250 cycle_t start, stop;
1251
1252 start = ftrace_now(raw_smp_processor_id());
1253 ftrace_update_cnt = 0;
1254
1255 while (ftrace_new_addrs) {
1256
1257 /* If something went wrong, bail without enabling anything */
1258 if (unlikely(ftrace_disabled))
1259 return -1;
1260
1261 p = ftrace_new_addrs;
1262 ftrace_new_addrs = p->newlist;
1263 p->flags = 0L;
1264
1265 /* convert record (i.e, patch mcount-call with NOP) */
1266 if (ftrace_code_disable(mod, p)) {
1267 p->flags |= FTRACE_FL_CONVERTED;
1268 ftrace_update_cnt++;
1269 } else
1270 ftrace_free_rec(p);
1271 }
1272
1273 stop = ftrace_now(raw_smp_processor_id());
1274 ftrace_update_time = stop - start;
1275 ftrace_update_tot_cnt += ftrace_update_cnt;
1276
1277 return 0;
1278 }
1279
1280 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
1281 {
1282 struct ftrace_page *pg;
1283 int cnt;
1284 int i;
1285
1286 /* allocate a few pages */
1287 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
1288 if (!ftrace_pages_start)
1289 return -1;
1290
1291 /*
1292 * Allocate a few more pages.
1293 *
1294 * TODO: have some parser search vmlinux before
1295 * final linking to find all calls to ftrace.
1296 * Then we can:
1297 * a) know how many pages to allocate.
1298 * and/or
1299 * b) set up the table then.
1300 *
1301 * The dynamic code is still necessary for
1302 * modules.
1303 */
1304
1305 pg = ftrace_pages = ftrace_pages_start;
1306
1307 cnt = num_to_init / ENTRIES_PER_PAGE;
1308 pr_info("ftrace: allocating %ld entries in %d pages\n",
1309 num_to_init, cnt + 1);
1310
1311 for (i = 0; i < cnt; i++) {
1312 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
1313
1314 /* If we fail, we'll try later anyway */
1315 if (!pg->next)
1316 break;
1317
1318 pg = pg->next;
1319 }
1320
1321 return 0;
1322 }
1323
1324 enum {
1325 FTRACE_ITER_FILTER = (1 << 0),
1326 FTRACE_ITER_NOTRACE = (1 << 1),
1327 FTRACE_ITER_FAILURES = (1 << 2),
1328 FTRACE_ITER_PRINTALL = (1 << 3),
1329 FTRACE_ITER_HASH = (1 << 4),
1330 };
1331
1332 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
1333
1334 struct ftrace_iterator {
1335 struct ftrace_page *pg;
1336 int hidx;
1337 int idx;
1338 unsigned flags;
1339 struct trace_parser parser;
1340 };
1341
1342 static void *
1343 t_hash_next(struct seq_file *m, void *v, loff_t *pos)
1344 {
1345 struct ftrace_iterator *iter = m->private;
1346 struct hlist_node *hnd = v;
1347 struct hlist_head *hhd;
1348
1349 WARN_ON(!(iter->flags & FTRACE_ITER_HASH));
1350
1351 (*pos)++;
1352
1353 retry:
1354 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
1355 return NULL;
1356
1357 hhd = &ftrace_func_hash[iter->hidx];
1358
1359 if (hlist_empty(hhd)) {
1360 iter->hidx++;
1361 hnd = NULL;
1362 goto retry;
1363 }
1364
1365 if (!hnd)
1366 hnd = hhd->first;
1367 else {
1368 hnd = hnd->next;
1369 if (!hnd) {
1370 iter->hidx++;
1371 goto retry;
1372 }
1373 }
1374
1375 return hnd;
1376 }
1377
1378 static void *t_hash_start(struct seq_file *m, loff_t *pos)
1379 {
1380 struct ftrace_iterator *iter = m->private;
1381 void *p = NULL;
1382 loff_t l;
1383
1384 if (!(iter->flags & FTRACE_ITER_HASH))
1385 *pos = 0;
1386
1387 iter->flags |= FTRACE_ITER_HASH;
1388
1389 iter->hidx = 0;
1390 for (l = 0; l <= *pos; ) {
1391 p = t_hash_next(m, p, &l);
1392 if (!p)
1393 break;
1394 }
1395 return p;
1396 }
1397
1398 static int t_hash_show(struct seq_file *m, void *v)
1399 {
1400 struct ftrace_func_probe *rec;
1401 struct hlist_node *hnd = v;
1402
1403 rec = hlist_entry(hnd, struct ftrace_func_probe, node);
1404
1405 if (rec->ops->print)
1406 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
1407
1408 seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
1409
1410 if (rec->data)
1411 seq_printf(m, ":%p", rec->data);
1412 seq_putc(m, '\n');
1413
1414 return 0;
1415 }
1416
1417 static void *
1418 t_next(struct seq_file *m, void *v, loff_t *pos)
1419 {
1420 struct ftrace_iterator *iter = m->private;
1421 struct dyn_ftrace *rec = NULL;
1422
1423 if (iter->flags & FTRACE_ITER_HASH)
1424 return t_hash_next(m, v, pos);
1425
1426 (*pos)++;
1427
1428 if (iter->flags & FTRACE_ITER_PRINTALL)
1429 return NULL;
1430
1431 retry:
1432 if (iter->idx >= iter->pg->index) {
1433 if (iter->pg->next) {
1434 iter->pg = iter->pg->next;
1435 iter->idx = 0;
1436 goto retry;
1437 }
1438 } else {
1439 rec = &iter->pg->records[iter->idx++];
1440 if ((rec->flags & FTRACE_FL_FREE) ||
1441
1442 (!(iter->flags & FTRACE_ITER_FAILURES) &&
1443 (rec->flags & FTRACE_FL_FAILED)) ||
1444
1445 ((iter->flags & FTRACE_ITER_FAILURES) &&
1446 !(rec->flags & FTRACE_FL_FAILED)) ||
1447
1448 ((iter->flags & FTRACE_ITER_FILTER) &&
1449 !(rec->flags & FTRACE_FL_FILTER)) ||
1450
1451 ((iter->flags & FTRACE_ITER_NOTRACE) &&
1452 !(rec->flags & FTRACE_FL_NOTRACE))) {
1453 rec = NULL;
1454 goto retry;
1455 }
1456 }
1457
1458 return rec;
1459 }
1460
1461 static void *t_start(struct seq_file *m, loff_t *pos)
1462 {
1463 struct ftrace_iterator *iter = m->private;
1464 void *p = NULL;
1465 loff_t l;
1466
1467 mutex_lock(&ftrace_lock);
1468 /*
1469 * For set_ftrace_filter reading, if we have the filter
1470 * off, we can short cut and just print out that all
1471 * functions are enabled.
1472 */
1473 if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
1474 if (*pos > 0)
1475 return t_hash_start(m, pos);
1476 iter->flags |= FTRACE_ITER_PRINTALL;
1477 return iter;
1478 }
1479
1480 if (iter->flags & FTRACE_ITER_HASH)
1481 return t_hash_start(m, pos);
1482
1483 iter->pg = ftrace_pages_start;
1484 iter->idx = 0;
1485 for (l = 0; l <= *pos; ) {
1486 p = t_next(m, p, &l);
1487 if (!p)
1488 break;
1489 }
1490
1491 if (!p && iter->flags & FTRACE_ITER_FILTER)
1492 return t_hash_start(m, pos);
1493
1494 return p;
1495 }
1496
1497 static void t_stop(struct seq_file *m, void *p)
1498 {
1499 mutex_unlock(&ftrace_lock);
1500 }
1501
1502 static int t_show(struct seq_file *m, void *v)
1503 {
1504 struct ftrace_iterator *iter = m->private;
1505 struct dyn_ftrace *rec = v;
1506
1507 if (iter->flags & FTRACE_ITER_HASH)
1508 return t_hash_show(m, v);
1509
1510 if (iter->flags & FTRACE_ITER_PRINTALL) {
1511 seq_printf(m, "#### all functions enabled ####\n");
1512 return 0;
1513 }
1514
1515 if (!rec)
1516 return 0;
1517
1518 seq_printf(m, "%ps\n", (void *)rec->ip);
1519
1520 return 0;
1521 }
1522
1523 static const struct seq_operations show_ftrace_seq_ops = {
1524 .start = t_start,
1525 .next = t_next,
1526 .stop = t_stop,
1527 .show = t_show,
1528 };
1529
1530 static int
1531 ftrace_avail_open(struct inode *inode, struct file *file)
1532 {
1533 struct ftrace_iterator *iter;
1534 int ret;
1535
1536 if (unlikely(ftrace_disabled))
1537 return -ENODEV;
1538
1539 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1540 if (!iter)
1541 return -ENOMEM;
1542
1543 iter->pg = ftrace_pages_start;
1544
1545 ret = seq_open(file, &show_ftrace_seq_ops);
1546 if (!ret) {
1547 struct seq_file *m = file->private_data;
1548
1549 m->private = iter;
1550 } else {
1551 kfree(iter);
1552 }
1553
1554 return ret;
1555 }
1556
1557 static int
1558 ftrace_failures_open(struct inode *inode, struct file *file)
1559 {
1560 int ret;
1561 struct seq_file *m;
1562 struct ftrace_iterator *iter;
1563
1564 ret = ftrace_avail_open(inode, file);
1565 if (!ret) {
1566 m = (struct seq_file *)file->private_data;
1567 iter = (struct ftrace_iterator *)m->private;
1568 iter->flags = FTRACE_ITER_FAILURES;
1569 }
1570
1571 return ret;
1572 }
1573
1574
1575 static void ftrace_filter_reset(int enable)
1576 {
1577 struct ftrace_page *pg;
1578 struct dyn_ftrace *rec;
1579 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1580
1581 mutex_lock(&ftrace_lock);
1582 if (enable)
1583 ftrace_filtered = 0;
1584 do_for_each_ftrace_rec(pg, rec) {
1585 if (rec->flags & FTRACE_FL_FAILED)
1586 continue;
1587 rec->flags &= ~type;
1588 } while_for_each_ftrace_rec();
1589 mutex_unlock(&ftrace_lock);
1590 }
1591
1592 static int
1593 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1594 {
1595 struct ftrace_iterator *iter;
1596 int ret = 0;
1597
1598 if (unlikely(ftrace_disabled))
1599 return -ENODEV;
1600
1601 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1602 if (!iter)
1603 return -ENOMEM;
1604
1605 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
1606 kfree(iter);
1607 return -ENOMEM;
1608 }
1609
1610 mutex_lock(&ftrace_regex_lock);
1611 if ((file->f_mode & FMODE_WRITE) &&
1612 (file->f_flags & O_TRUNC))
1613 ftrace_filter_reset(enable);
1614
1615 if (file->f_mode & FMODE_READ) {
1616 iter->pg = ftrace_pages_start;
1617 iter->flags = enable ? FTRACE_ITER_FILTER :
1618 FTRACE_ITER_NOTRACE;
1619
1620 ret = seq_open(file, &show_ftrace_seq_ops);
1621 if (!ret) {
1622 struct seq_file *m = file->private_data;
1623 m->private = iter;
1624 } else
1625 kfree(iter);
1626 } else
1627 file->private_data = iter;
1628 mutex_unlock(&ftrace_regex_lock);
1629
1630 return ret;
1631 }
1632
1633 static int
1634 ftrace_filter_open(struct inode *inode, struct file *file)
1635 {
1636 return ftrace_regex_open(inode, file, 1);
1637 }
1638
1639 static int
1640 ftrace_notrace_open(struct inode *inode, struct file *file)
1641 {
1642 return ftrace_regex_open(inode, file, 0);
1643 }
1644
1645 static loff_t
1646 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1647 {
1648 loff_t ret;
1649
1650 if (file->f_mode & FMODE_READ)
1651 ret = seq_lseek(file, offset, origin);
1652 else
1653 file->f_pos = ret = 1;
1654
1655 return ret;
1656 }
1657
1658 enum {
1659 MATCH_FULL,
1660 MATCH_FRONT_ONLY,
1661 MATCH_MIDDLE_ONLY,
1662 MATCH_END_ONLY,
1663 };
1664
1665 /*
1666 * (static function - no need for kernel doc)
1667 *
1668 * Pass in a buffer containing a glob and this function will
1669 * set search to point to the search part of the buffer and
1670 * return the type of search it is (see enum above).
1671 * This does modify buff.
1672 *
1673 * Returns enum type.
1674 * search returns the pointer to use for comparison.
1675 * not returns 1 if buff started with a '!'
1676 * 0 otherwise.
1677 */
1678 static int
1679 ftrace_setup_glob(char *buff, int len, char **search, int *not)
1680 {
1681 int type = MATCH_FULL;
1682 int i;
1683
1684 if (buff[0] == '!') {
1685 *not = 1;
1686 buff++;
1687 len--;
1688 } else
1689 *not = 0;
1690
1691 *search = buff;
1692
1693 for (i = 0; i < len; i++) {
1694 if (buff[i] == '*') {
1695 if (!i) {
1696 *search = buff + 1;
1697 type = MATCH_END_ONLY;
1698 } else {
1699 if (type == MATCH_END_ONLY)
1700 type = MATCH_MIDDLE_ONLY;
1701 else
1702 type = MATCH_FRONT_ONLY;
1703 buff[i] = 0;
1704 break;
1705 }
1706 }
1707 }
1708
1709 return type;
1710 }
1711
1712 static int ftrace_match(char *str, char *regex, int len, int type)
1713 {
1714 int matched = 0;
1715 char *ptr;
1716
1717 switch (type) {
1718 case MATCH_FULL:
1719 if (strcmp(str, regex) == 0)
1720 matched = 1;
1721 break;
1722 case MATCH_FRONT_ONLY:
1723 if (strncmp(str, regex, len) == 0)
1724 matched = 1;
1725 break;
1726 case MATCH_MIDDLE_ONLY:
1727 if (strstr(str, regex))
1728 matched = 1;
1729 break;
1730 case MATCH_END_ONLY:
1731 ptr = strstr(str, regex);
1732 if (ptr && (ptr[len] == 0))
1733 matched = 1;
1734 break;
1735 }
1736
1737 return matched;
1738 }
1739
1740 static int
1741 ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
1742 {
1743 char str[KSYM_SYMBOL_LEN];
1744
1745 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1746 return ftrace_match(str, regex, len, type);
1747 }
1748
1749 static void ftrace_match_records(char *buff, int len, int enable)
1750 {
1751 unsigned int search_len;
1752 struct ftrace_page *pg;
1753 struct dyn_ftrace *rec;
1754 unsigned long flag;
1755 char *search;
1756 int type;
1757 int not;
1758
1759 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1760 type = ftrace_setup_glob(buff, len, &search, &not);
1761
1762 search_len = strlen(search);
1763
1764 mutex_lock(&ftrace_lock);
1765 do_for_each_ftrace_rec(pg, rec) {
1766
1767 if (rec->flags & FTRACE_FL_FAILED)
1768 continue;
1769
1770 if (ftrace_match_record(rec, search, search_len, type)) {
1771 if (not)
1772 rec->flags &= ~flag;
1773 else
1774 rec->flags |= flag;
1775 }
1776 /*
1777 * Only enable filtering if we have a function that
1778 * is filtered on.
1779 */
1780 if (enable && (rec->flags & FTRACE_FL_FILTER))
1781 ftrace_filtered = 1;
1782 } while_for_each_ftrace_rec();
1783 mutex_unlock(&ftrace_lock);
1784 }
1785
1786 static int
1787 ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
1788 char *regex, int len, int type)
1789 {
1790 char str[KSYM_SYMBOL_LEN];
1791 char *modname;
1792
1793 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
1794
1795 if (!modname || strcmp(modname, mod))
1796 return 0;
1797
1798 /* blank search means to match all funcs in the mod */
1799 if (len)
1800 return ftrace_match(str, regex, len, type);
1801 else
1802 return 1;
1803 }
1804
1805 static void ftrace_match_module_records(char *buff, char *mod, int enable)
1806 {
1807 unsigned search_len = 0;
1808 struct ftrace_page *pg;
1809 struct dyn_ftrace *rec;
1810 int type = MATCH_FULL;
1811 char *search = buff;
1812 unsigned long flag;
1813 int not = 0;
1814
1815 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1816
1817 /* blank or '*' mean the same */
1818 if (strcmp(buff, "*") == 0)
1819 buff[0] = 0;
1820
1821 /* handle the case of 'dont filter this module' */
1822 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
1823 buff[0] = 0;
1824 not = 1;
1825 }
1826
1827 if (strlen(buff)) {
1828 type = ftrace_setup_glob(buff, strlen(buff), &search, &not);
1829 search_len = strlen(search);
1830 }
1831
1832 mutex_lock(&ftrace_lock);
1833 do_for_each_ftrace_rec(pg, rec) {
1834
1835 if (rec->flags & FTRACE_FL_FAILED)
1836 continue;
1837
1838 if (ftrace_match_module_record(rec, mod,
1839 search, search_len, type)) {
1840 if (not)
1841 rec->flags &= ~flag;
1842 else
1843 rec->flags |= flag;
1844 }
1845 if (enable && (rec->flags & FTRACE_FL_FILTER))
1846 ftrace_filtered = 1;
1847
1848 } while_for_each_ftrace_rec();
1849 mutex_unlock(&ftrace_lock);
1850 }
1851
1852 /*
1853 * We register the module command as a template to show others how
1854 * to register the a command as well.
1855 */
1856
1857 static int
1858 ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
1859 {
1860 char *mod;
1861
1862 /*
1863 * cmd == 'mod' because we only registered this func
1864 * for the 'mod' ftrace_func_command.
1865 * But if you register one func with multiple commands,
1866 * you can tell which command was used by the cmd
1867 * parameter.
1868 */
1869
1870 /* we must have a module name */
1871 if (!param)
1872 return -EINVAL;
1873
1874 mod = strsep(&param, ":");
1875 if (!strlen(mod))
1876 return -EINVAL;
1877
1878 ftrace_match_module_records(func, mod, enable);
1879 return 0;
1880 }
1881
1882 static struct ftrace_func_command ftrace_mod_cmd = {
1883 .name = "mod",
1884 .func = ftrace_mod_callback,
1885 };
1886
1887 static int __init ftrace_mod_cmd_init(void)
1888 {
1889 return register_ftrace_command(&ftrace_mod_cmd);
1890 }
1891 device_initcall(ftrace_mod_cmd_init);
1892
1893 static void
1894 function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
1895 {
1896 struct ftrace_func_probe *entry;
1897 struct hlist_head *hhd;
1898 struct hlist_node *n;
1899 unsigned long key;
1900 int resched;
1901
1902 key = hash_long(ip, FTRACE_HASH_BITS);
1903
1904 hhd = &ftrace_func_hash[key];
1905
1906 if (hlist_empty(hhd))
1907 return;
1908
1909 /*
1910 * Disable preemption for these calls to prevent a RCU grace
1911 * period. This syncs the hash iteration and freeing of items
1912 * on the hash. rcu_read_lock is too dangerous here.
1913 */
1914 resched = ftrace_preempt_disable();
1915 hlist_for_each_entry_rcu(entry, n, hhd, node) {
1916 if (entry->ip == ip)
1917 entry->ops->func(ip, parent_ip, &entry->data);
1918 }
1919 ftrace_preempt_enable(resched);
1920 }
1921
1922 static struct ftrace_ops trace_probe_ops __read_mostly =
1923 {
1924 .func = function_trace_probe_call,
1925 };
1926
1927 static int ftrace_probe_registered;
1928
1929 static void __enable_ftrace_function_probe(void)
1930 {
1931 int i;
1932
1933 if (ftrace_probe_registered)
1934 return;
1935
1936 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1937 struct hlist_head *hhd = &ftrace_func_hash[i];
1938 if (hhd->first)
1939 break;
1940 }
1941 /* Nothing registered? */
1942 if (i == FTRACE_FUNC_HASHSIZE)
1943 return;
1944
1945 __register_ftrace_function(&trace_probe_ops);
1946 ftrace_startup(0);
1947 ftrace_probe_registered = 1;
1948 }
1949
1950 static void __disable_ftrace_function_probe(void)
1951 {
1952 int i;
1953
1954 if (!ftrace_probe_registered)
1955 return;
1956
1957 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1958 struct hlist_head *hhd = &ftrace_func_hash[i];
1959 if (hhd->first)
1960 return;
1961 }
1962
1963 /* no more funcs left */
1964 __unregister_ftrace_function(&trace_probe_ops);
1965 ftrace_shutdown(0);
1966 ftrace_probe_registered = 0;
1967 }
1968
1969
1970 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
1971 {
1972 struct ftrace_func_probe *entry =
1973 container_of(rhp, struct ftrace_func_probe, rcu);
1974
1975 if (entry->ops->free)
1976 entry->ops->free(&entry->data);
1977 kfree(entry);
1978 }
1979
1980
1981 int
1982 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
1983 void *data)
1984 {
1985 struct ftrace_func_probe *entry;
1986 struct ftrace_page *pg;
1987 struct dyn_ftrace *rec;
1988 int type, len, not;
1989 unsigned long key;
1990 int count = 0;
1991 char *search;
1992
1993 type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
1994 len = strlen(search);
1995
1996 /* we do not support '!' for function probes */
1997 if (WARN_ON(not))
1998 return -EINVAL;
1999
2000 mutex_lock(&ftrace_lock);
2001 do_for_each_ftrace_rec(pg, rec) {
2002
2003 if (rec->flags & FTRACE_FL_FAILED)
2004 continue;
2005
2006 if (!ftrace_match_record(rec, search, len, type))
2007 continue;
2008
2009 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2010 if (!entry) {
2011 /* If we did not process any, then return error */
2012 if (!count)
2013 count = -ENOMEM;
2014 goto out_unlock;
2015 }
2016
2017 count++;
2018
2019 entry->data = data;
2020
2021 /*
2022 * The caller might want to do something special
2023 * for each function we find. We call the callback
2024 * to give the caller an opportunity to do so.
2025 */
2026 if (ops->callback) {
2027 if (ops->callback(rec->ip, &entry->data) < 0) {
2028 /* caller does not like this func */
2029 kfree(entry);
2030 continue;
2031 }
2032 }
2033
2034 entry->ops = ops;
2035 entry->ip = rec->ip;
2036
2037 key = hash_long(entry->ip, FTRACE_HASH_BITS);
2038 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
2039
2040 } while_for_each_ftrace_rec();
2041 __enable_ftrace_function_probe();
2042
2043 out_unlock:
2044 mutex_unlock(&ftrace_lock);
2045
2046 return count;
2047 }
2048
2049 enum {
2050 PROBE_TEST_FUNC = 1,
2051 PROBE_TEST_DATA = 2
2052 };
2053
2054 static void
2055 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2056 void *data, int flags)
2057 {
2058 struct ftrace_func_probe *entry;
2059 struct hlist_node *n, *tmp;
2060 char str[KSYM_SYMBOL_LEN];
2061 int type = MATCH_FULL;
2062 int i, len = 0;
2063 char *search;
2064
2065 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
2066 glob = NULL;
2067 else if (glob) {
2068 int not;
2069
2070 type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
2071 len = strlen(search);
2072
2073 /* we do not support '!' for function probes */
2074 if (WARN_ON(not))
2075 return;
2076 }
2077
2078 mutex_lock(&ftrace_lock);
2079 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2080 struct hlist_head *hhd = &ftrace_func_hash[i];
2081
2082 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
2083
2084 /* break up if statements for readability */
2085 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
2086 continue;
2087
2088 if ((flags & PROBE_TEST_DATA) && entry->data != data)
2089 continue;
2090
2091 /* do this last, since it is the most expensive */
2092 if (glob) {
2093 kallsyms_lookup(entry->ip, NULL, NULL,
2094 NULL, str);
2095 if (!ftrace_match(str, glob, len, type))
2096 continue;
2097 }
2098
2099 hlist_del(&entry->node);
2100 call_rcu(&entry->rcu, ftrace_free_entry_rcu);
2101 }
2102 }
2103 __disable_ftrace_function_probe();
2104 mutex_unlock(&ftrace_lock);
2105 }
2106
2107 void
2108 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2109 void *data)
2110 {
2111 __unregister_ftrace_function_probe(glob, ops, data,
2112 PROBE_TEST_FUNC | PROBE_TEST_DATA);
2113 }
2114
2115 void
2116 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
2117 {
2118 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
2119 }
2120
2121 void unregister_ftrace_function_probe_all(char *glob)
2122 {
2123 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
2124 }
2125
2126 static LIST_HEAD(ftrace_commands);
2127 static DEFINE_MUTEX(ftrace_cmd_mutex);
2128
2129 int register_ftrace_command(struct ftrace_func_command *cmd)
2130 {
2131 struct ftrace_func_command *p;
2132 int ret = 0;
2133
2134 mutex_lock(&ftrace_cmd_mutex);
2135 list_for_each_entry(p, &ftrace_commands, list) {
2136 if (strcmp(cmd->name, p->name) == 0) {
2137 ret = -EBUSY;
2138 goto out_unlock;
2139 }
2140 }
2141 list_add(&cmd->list, &ftrace_commands);
2142 out_unlock:
2143 mutex_unlock(&ftrace_cmd_mutex);
2144
2145 return ret;
2146 }
2147
2148 int unregister_ftrace_command(struct ftrace_func_command *cmd)
2149 {
2150 struct ftrace_func_command *p, *n;
2151 int ret = -ENODEV;
2152
2153 mutex_lock(&ftrace_cmd_mutex);
2154 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
2155 if (strcmp(cmd->name, p->name) == 0) {
2156 ret = 0;
2157 list_del_init(&p->list);
2158 goto out_unlock;
2159 }
2160 }
2161 out_unlock:
2162 mutex_unlock(&ftrace_cmd_mutex);
2163
2164 return ret;
2165 }
2166
2167 static int ftrace_process_regex(char *buff, int len, int enable)
2168 {
2169 char *func, *command, *next = buff;
2170 struct ftrace_func_command *p;
2171 int ret = -EINVAL;
2172
2173 func = strsep(&next, ":");
2174
2175 if (!next) {
2176 ftrace_match_records(func, len, enable);
2177 return 0;
2178 }
2179
2180 /* command found */
2181
2182 command = strsep(&next, ":");
2183
2184 mutex_lock(&ftrace_cmd_mutex);
2185 list_for_each_entry(p, &ftrace_commands, list) {
2186 if (strcmp(p->name, command) == 0) {
2187 ret = p->func(func, command, next, enable);
2188 goto out_unlock;
2189 }
2190 }
2191 out_unlock:
2192 mutex_unlock(&ftrace_cmd_mutex);
2193
2194 return ret;
2195 }
2196
2197 static ssize_t
2198 ftrace_regex_write(struct file *file, const char __user *ubuf,
2199 size_t cnt, loff_t *ppos, int enable)
2200 {
2201 struct ftrace_iterator *iter;
2202 struct trace_parser *parser;
2203 ssize_t ret, read;
2204
2205 if (!cnt || cnt < 0)
2206 return 0;
2207
2208 mutex_lock(&ftrace_regex_lock);
2209
2210 if (file->f_mode & FMODE_READ) {
2211 struct seq_file *m = file->private_data;
2212 iter = m->private;
2213 } else
2214 iter = file->private_data;
2215
2216 parser = &iter->parser;
2217 read = trace_get_user(parser, ubuf, cnt, ppos);
2218
2219 if (trace_parser_loaded(parser) &&
2220 !trace_parser_cont(parser)) {
2221 ret = ftrace_process_regex(parser->buffer,
2222 parser->idx, enable);
2223 if (ret)
2224 goto out;
2225
2226 trace_parser_clear(parser);
2227 }
2228
2229 ret = read;
2230
2231 mutex_unlock(&ftrace_regex_lock);
2232 out:
2233 return ret;
2234 }
2235
2236 static ssize_t
2237 ftrace_filter_write(struct file *file, const char __user *ubuf,
2238 size_t cnt, loff_t *ppos)
2239 {
2240 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
2241 }
2242
2243 static ssize_t
2244 ftrace_notrace_write(struct file *file, const char __user *ubuf,
2245 size_t cnt, loff_t *ppos)
2246 {
2247 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
2248 }
2249
2250 static void
2251 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
2252 {
2253 if (unlikely(ftrace_disabled))
2254 return;
2255
2256 mutex_lock(&ftrace_regex_lock);
2257 if (reset)
2258 ftrace_filter_reset(enable);
2259 if (buf)
2260 ftrace_match_records(buf, len, enable);
2261 mutex_unlock(&ftrace_regex_lock);
2262 }
2263
2264 /**
2265 * ftrace_set_filter - set a function to filter on in ftrace
2266 * @buf - the string that holds the function filter text.
2267 * @len - the length of the string.
2268 * @reset - non zero to reset all filters before applying this filter.
2269 *
2270 * Filters denote which functions should be enabled when tracing is enabled.
2271 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2272 */
2273 void ftrace_set_filter(unsigned char *buf, int len, int reset)
2274 {
2275 ftrace_set_regex(buf, len, reset, 1);
2276 }
2277
2278 /**
2279 * ftrace_set_notrace - set a function to not trace in ftrace
2280 * @buf - the string that holds the function notrace text.
2281 * @len - the length of the string.
2282 * @reset - non zero to reset all filters before applying this filter.
2283 *
2284 * Notrace Filters denote which functions should not be enabled when tracing
2285 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2286 * for tracing.
2287 */
2288 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
2289 {
2290 ftrace_set_regex(buf, len, reset, 0);
2291 }
2292
2293 /*
2294 * command line interface to allow users to set filters on boot up.
2295 */
2296 #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
2297 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
2298 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
2299
2300 static int __init set_ftrace_notrace(char *str)
2301 {
2302 strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
2303 return 1;
2304 }
2305 __setup("ftrace_notrace=", set_ftrace_notrace);
2306
2307 static int __init set_ftrace_filter(char *str)
2308 {
2309 strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
2310 return 1;
2311 }
2312 __setup("ftrace_filter=", set_ftrace_filter);
2313
2314 static void __init set_ftrace_early_filter(char *buf, int enable)
2315 {
2316 char *func;
2317
2318 while (buf) {
2319 func = strsep(&buf, ",");
2320 ftrace_set_regex(func, strlen(func), 0, enable);
2321 }
2322 }
2323
2324 static void __init set_ftrace_early_filters(void)
2325 {
2326 if (ftrace_filter_buf[0])
2327 set_ftrace_early_filter(ftrace_filter_buf, 1);
2328 if (ftrace_notrace_buf[0])
2329 set_ftrace_early_filter(ftrace_notrace_buf, 0);
2330 }
2331
2332 static int
2333 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
2334 {
2335 struct seq_file *m = (struct seq_file *)file->private_data;
2336 struct ftrace_iterator *iter;
2337 struct trace_parser *parser;
2338
2339 mutex_lock(&ftrace_regex_lock);
2340 if (file->f_mode & FMODE_READ) {
2341 iter = m->private;
2342
2343 seq_release(inode, file);
2344 } else
2345 iter = file->private_data;
2346
2347 parser = &iter->parser;
2348 if (trace_parser_loaded(parser)) {
2349 parser->buffer[parser->idx] = 0;
2350 ftrace_match_records(parser->buffer, parser->idx, enable);
2351 }
2352
2353 mutex_lock(&ftrace_lock);
2354 if (ftrace_start_up && ftrace_enabled)
2355 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
2356 mutex_unlock(&ftrace_lock);
2357
2358 trace_parser_put(parser);
2359 kfree(iter);
2360
2361 mutex_unlock(&ftrace_regex_lock);
2362 return 0;
2363 }
2364
2365 static int
2366 ftrace_filter_release(struct inode *inode, struct file *file)
2367 {
2368 return ftrace_regex_release(inode, file, 1);
2369 }
2370
2371 static int
2372 ftrace_notrace_release(struct inode *inode, struct file *file)
2373 {
2374 return ftrace_regex_release(inode, file, 0);
2375 }
2376
2377 static const struct file_operations ftrace_avail_fops = {
2378 .open = ftrace_avail_open,
2379 .read = seq_read,
2380 .llseek = seq_lseek,
2381 .release = seq_release_private,
2382 };
2383
2384 static const struct file_operations ftrace_failures_fops = {
2385 .open = ftrace_failures_open,
2386 .read = seq_read,
2387 .llseek = seq_lseek,
2388 .release = seq_release_private,
2389 };
2390
2391 static const struct file_operations ftrace_filter_fops = {
2392 .open = ftrace_filter_open,
2393 .read = seq_read,
2394 .write = ftrace_filter_write,
2395 .llseek = ftrace_regex_lseek,
2396 .release = ftrace_filter_release,
2397 };
2398
2399 static const struct file_operations ftrace_notrace_fops = {
2400 .open = ftrace_notrace_open,
2401 .read = seq_read,
2402 .write = ftrace_notrace_write,
2403 .llseek = ftrace_regex_lseek,
2404 .release = ftrace_notrace_release,
2405 };
2406
2407 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2408
2409 static DEFINE_MUTEX(graph_lock);
2410
2411 int ftrace_graph_count;
2412 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
2413
2414 static void *
2415 __g_next(struct seq_file *m, loff_t *pos)
2416 {
2417 if (*pos >= ftrace_graph_count)
2418 return NULL;
2419 return &ftrace_graph_funcs[*pos];
2420 }
2421
2422 static void *
2423 g_next(struct seq_file *m, void *v, loff_t *pos)
2424 {
2425 (*pos)++;
2426 return __g_next(m, pos);
2427 }
2428
2429 static void *g_start(struct seq_file *m, loff_t *pos)
2430 {
2431 mutex_lock(&graph_lock);
2432
2433 /* Nothing, tell g_show to print all functions are enabled */
2434 if (!ftrace_graph_count && !*pos)
2435 return (void *)1;
2436
2437 return __g_next(m, pos);
2438 }
2439
2440 static void g_stop(struct seq_file *m, void *p)
2441 {
2442 mutex_unlock(&graph_lock);
2443 }
2444
2445 static int g_show(struct seq_file *m, void *v)
2446 {
2447 unsigned long *ptr = v;
2448
2449 if (!ptr)
2450 return 0;
2451
2452 if (ptr == (unsigned long *)1) {
2453 seq_printf(m, "#### all functions enabled ####\n");
2454 return 0;
2455 }
2456
2457 seq_printf(m, "%ps\n", (void *)*ptr);
2458
2459 return 0;
2460 }
2461
2462 static const struct seq_operations ftrace_graph_seq_ops = {
2463 .start = g_start,
2464 .next = g_next,
2465 .stop = g_stop,
2466 .show = g_show,
2467 };
2468
2469 static int
2470 ftrace_graph_open(struct inode *inode, struct file *file)
2471 {
2472 int ret = 0;
2473
2474 if (unlikely(ftrace_disabled))
2475 return -ENODEV;
2476
2477 mutex_lock(&graph_lock);
2478 if ((file->f_mode & FMODE_WRITE) &&
2479 (file->f_flags & O_TRUNC)) {
2480 ftrace_graph_count = 0;
2481 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
2482 }
2483 mutex_unlock(&graph_lock);
2484
2485 if (file->f_mode & FMODE_READ)
2486 ret = seq_open(file, &ftrace_graph_seq_ops);
2487
2488 return ret;
2489 }
2490
2491 static int
2492 ftrace_graph_release(struct inode *inode, struct file *file)
2493 {
2494 if (file->f_mode & FMODE_READ)
2495 seq_release(inode, file);
2496 return 0;
2497 }
2498
2499 static int
2500 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
2501 {
2502 struct dyn_ftrace *rec;
2503 struct ftrace_page *pg;
2504 int search_len;
2505 int found = 0;
2506 int type, not;
2507 char *search;
2508 bool exists;
2509 int i;
2510
2511 if (ftrace_disabled)
2512 return -ENODEV;
2513
2514 /* decode regex */
2515 type = ftrace_setup_glob(buffer, strlen(buffer), &search, &not);
2516 if (not)
2517 return -EINVAL;
2518
2519 search_len = strlen(search);
2520
2521 mutex_lock(&ftrace_lock);
2522 do_for_each_ftrace_rec(pg, rec) {
2523
2524 if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
2525 break;
2526
2527 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
2528 continue;
2529
2530 if (ftrace_match_record(rec, search, search_len, type)) {
2531 /* ensure it is not already in the array */
2532 exists = false;
2533 for (i = 0; i < *idx; i++)
2534 if (array[i] == rec->ip) {
2535 exists = true;
2536 break;
2537 }
2538 if (!exists) {
2539 array[(*idx)++] = rec->ip;
2540 found = 1;
2541 }
2542 }
2543 } while_for_each_ftrace_rec();
2544
2545 mutex_unlock(&ftrace_lock);
2546
2547 return found ? 0 : -EINVAL;
2548 }
2549
2550 static ssize_t
2551 ftrace_graph_write(struct file *file, const char __user *ubuf,
2552 size_t cnt, loff_t *ppos)
2553 {
2554 struct trace_parser parser;
2555 size_t read = 0;
2556 ssize_t ret;
2557
2558 if (!cnt || cnt < 0)
2559 return 0;
2560
2561 mutex_lock(&graph_lock);
2562
2563 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
2564 ret = -EBUSY;
2565 goto out;
2566 }
2567
2568 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
2569 ret = -ENOMEM;
2570 goto out;
2571 }
2572
2573 read = trace_get_user(&parser, ubuf, cnt, ppos);
2574
2575 if (trace_parser_loaded((&parser))) {
2576 parser.buffer[parser.idx] = 0;
2577
2578 /* we allow only one expression at a time */
2579 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
2580 parser.buffer);
2581 if (ret)
2582 goto out;
2583 }
2584
2585 ret = read;
2586 out:
2587 trace_parser_put(&parser);
2588 mutex_unlock(&graph_lock);
2589
2590 return ret;
2591 }
2592
2593 static const struct file_operations ftrace_graph_fops = {
2594 .open = ftrace_graph_open,
2595 .read = seq_read,
2596 .write = ftrace_graph_write,
2597 .release = ftrace_graph_release,
2598 };
2599 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2600
2601 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
2602 {
2603
2604 trace_create_file("available_filter_functions", 0444,
2605 d_tracer, NULL, &ftrace_avail_fops);
2606
2607 trace_create_file("failures", 0444,
2608 d_tracer, NULL, &ftrace_failures_fops);
2609
2610 trace_create_file("set_ftrace_filter", 0644, d_tracer,
2611 NULL, &ftrace_filter_fops);
2612
2613 trace_create_file("set_ftrace_notrace", 0644, d_tracer,
2614 NULL, &ftrace_notrace_fops);
2615
2616 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2617 trace_create_file("set_graph_function", 0444, d_tracer,
2618 NULL,
2619 &ftrace_graph_fops);
2620 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2621
2622 return 0;
2623 }
2624
2625 static int ftrace_convert_nops(struct module *mod,
2626 unsigned long *start,
2627 unsigned long *end)
2628 {
2629 unsigned long *p;
2630 unsigned long addr;
2631 unsigned long flags;
2632
2633 mutex_lock(&ftrace_lock);
2634 p = start;
2635 while (p < end) {
2636 addr = ftrace_call_adjust(*p++);
2637 /*
2638 * Some architecture linkers will pad between
2639 * the different mcount_loc sections of different
2640 * object files to satisfy alignments.
2641 * Skip any NULL pointers.
2642 */
2643 if (!addr)
2644 continue;
2645 ftrace_record_ip(addr);
2646 }
2647
2648 /* disable interrupts to prevent kstop machine */
2649 local_irq_save(flags);
2650 ftrace_update_code(mod);
2651 local_irq_restore(flags);
2652 mutex_unlock(&ftrace_lock);
2653
2654 return 0;
2655 }
2656
2657 #ifdef CONFIG_MODULES
2658 void ftrace_release(void *start, void *end)
2659 {
2660 struct dyn_ftrace *rec;
2661 struct ftrace_page *pg;
2662 unsigned long s = (unsigned long)start;
2663 unsigned long e = (unsigned long)end;
2664
2665 if (ftrace_disabled || !start || start == end)
2666 return;
2667
2668 mutex_lock(&ftrace_lock);
2669 do_for_each_ftrace_rec(pg, rec) {
2670 if ((rec->ip >= s) && (rec->ip < e)) {
2671 /*
2672 * rec->ip is changed in ftrace_free_rec()
2673 * It should not between s and e if record was freed.
2674 */
2675 FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
2676 ftrace_free_rec(rec);
2677 }
2678 } while_for_each_ftrace_rec();
2679 mutex_unlock(&ftrace_lock);
2680 }
2681
2682 static void ftrace_init_module(struct module *mod,
2683 unsigned long *start, unsigned long *end)
2684 {
2685 if (ftrace_disabled || start == end)
2686 return;
2687 ftrace_convert_nops(mod, start, end);
2688 }
2689
2690 static int ftrace_module_notify(struct notifier_block *self,
2691 unsigned long val, void *data)
2692 {
2693 struct module *mod = data;
2694
2695 switch (val) {
2696 case MODULE_STATE_COMING:
2697 ftrace_init_module(mod, mod->ftrace_callsites,
2698 mod->ftrace_callsites +
2699 mod->num_ftrace_callsites);
2700 break;
2701 case MODULE_STATE_GOING:
2702 ftrace_release(mod->ftrace_callsites,
2703 mod->ftrace_callsites +
2704 mod->num_ftrace_callsites);
2705 break;
2706 }
2707
2708 return 0;
2709 }
2710 #else
2711 static int ftrace_module_notify(struct notifier_block *self,
2712 unsigned long val, void *data)
2713 {
2714 return 0;
2715 }
2716 #endif /* CONFIG_MODULES */
2717
2718 struct notifier_block ftrace_module_nb = {
2719 .notifier_call = ftrace_module_notify,
2720 .priority = 0,
2721 };
2722
2723 extern unsigned long __start_mcount_loc[];
2724 extern unsigned long __stop_mcount_loc[];
2725
2726 void __init ftrace_init(void)
2727 {
2728 unsigned long count, addr, flags;
2729 int ret;
2730
2731 /* Keep the ftrace pointer to the stub */
2732 addr = (unsigned long)ftrace_stub;
2733
2734 local_irq_save(flags);
2735 ftrace_dyn_arch_init(&addr);
2736 local_irq_restore(flags);
2737
2738 /* ftrace_dyn_arch_init places the return code in addr */
2739 if (addr)
2740 goto failed;
2741
2742 count = __stop_mcount_loc - __start_mcount_loc;
2743
2744 ret = ftrace_dyn_table_alloc(count);
2745 if (ret)
2746 goto failed;
2747
2748 last_ftrace_enabled = ftrace_enabled = 1;
2749
2750 ret = ftrace_convert_nops(NULL,
2751 __start_mcount_loc,
2752 __stop_mcount_loc);
2753
2754 ret = register_module_notifier(&ftrace_module_nb);
2755 if (ret)
2756 pr_warning("Failed to register trace ftrace module notifier\n");
2757
2758 set_ftrace_early_filters();
2759
2760 return;
2761 failed:
2762 ftrace_disabled = 1;
2763 }
2764
2765 #else
2766
2767 static int __init ftrace_nodyn_init(void)
2768 {
2769 ftrace_enabled = 1;
2770 return 0;
2771 }
2772 device_initcall(ftrace_nodyn_init);
2773
2774 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
2775 static inline void ftrace_startup_enable(int command) { }
2776 /* Keep as macros so we do not need to define the commands */
2777 # define ftrace_startup(command) do { } while (0)
2778 # define ftrace_shutdown(command) do { } while (0)
2779 # define ftrace_startup_sysctl() do { } while (0)
2780 # define ftrace_shutdown_sysctl() do { } while (0)
2781 #endif /* CONFIG_DYNAMIC_FTRACE */
2782
2783 static ssize_t
2784 ftrace_pid_read(struct file *file, char __user *ubuf,
2785 size_t cnt, loff_t *ppos)
2786 {
2787 char buf[64];
2788 int r;
2789
2790 if (ftrace_pid_trace == ftrace_swapper_pid)
2791 r = sprintf(buf, "swapper tasks\n");
2792 else if (ftrace_pid_trace)
2793 r = sprintf(buf, "%u\n", pid_vnr(ftrace_pid_trace));
2794 else
2795 r = sprintf(buf, "no pid\n");
2796
2797 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2798 }
2799
2800 static void clear_ftrace_swapper(void)
2801 {
2802 struct task_struct *p;
2803 int cpu;
2804
2805 get_online_cpus();
2806 for_each_online_cpu(cpu) {
2807 p = idle_task(cpu);
2808 clear_tsk_trace_trace(p);
2809 }
2810 put_online_cpus();
2811 }
2812
2813 static void set_ftrace_swapper(void)
2814 {
2815 struct task_struct *p;
2816 int cpu;
2817
2818 get_online_cpus();
2819 for_each_online_cpu(cpu) {
2820 p = idle_task(cpu);
2821 set_tsk_trace_trace(p);
2822 }
2823 put_online_cpus();
2824 }
2825
2826 static void clear_ftrace_pid(struct pid *pid)
2827 {
2828 struct task_struct *p;
2829
2830 rcu_read_lock();
2831 do_each_pid_task(pid, PIDTYPE_PID, p) {
2832 clear_tsk_trace_trace(p);
2833 } while_each_pid_task(pid, PIDTYPE_PID, p);
2834 rcu_read_unlock();
2835
2836 put_pid(pid);
2837 }
2838
2839 static void set_ftrace_pid(struct pid *pid)
2840 {
2841 struct task_struct *p;
2842
2843 rcu_read_lock();
2844 do_each_pid_task(pid, PIDTYPE_PID, p) {
2845 set_tsk_trace_trace(p);
2846 } while_each_pid_task(pid, PIDTYPE_PID, p);
2847 rcu_read_unlock();
2848 }
2849
2850 static void clear_ftrace_pid_task(struct pid **pid)
2851 {
2852 if (*pid == ftrace_swapper_pid)
2853 clear_ftrace_swapper();
2854 else
2855 clear_ftrace_pid(*pid);
2856
2857 *pid = NULL;
2858 }
2859
2860 static void set_ftrace_pid_task(struct pid *pid)
2861 {
2862 if (pid == ftrace_swapper_pid)
2863 set_ftrace_swapper();
2864 else
2865 set_ftrace_pid(pid);
2866 }
2867
2868 static ssize_t
2869 ftrace_pid_write(struct file *filp, const char __user *ubuf,
2870 size_t cnt, loff_t *ppos)
2871 {
2872 struct pid *pid;
2873 char buf[64];
2874 long val;
2875 int ret;
2876
2877 if (cnt >= sizeof(buf))
2878 return -EINVAL;
2879
2880 if (copy_from_user(&buf, ubuf, cnt))
2881 return -EFAULT;
2882
2883 buf[cnt] = 0;
2884
2885 ret = strict_strtol(buf, 10, &val);
2886 if (ret < 0)
2887 return ret;
2888
2889 mutex_lock(&ftrace_lock);
2890 if (val < 0) {
2891 /* disable pid tracing */
2892 if (!ftrace_pid_trace)
2893 goto out;
2894
2895 clear_ftrace_pid_task(&ftrace_pid_trace);
2896
2897 } else {
2898 /* swapper task is special */
2899 if (!val) {
2900 pid = ftrace_swapper_pid;
2901 if (pid == ftrace_pid_trace)
2902 goto out;
2903 } else {
2904 pid = find_get_pid(val);
2905
2906 if (pid == ftrace_pid_trace) {
2907 put_pid(pid);
2908 goto out;
2909 }
2910 }
2911
2912 if (ftrace_pid_trace)
2913 clear_ftrace_pid_task(&ftrace_pid_trace);
2914
2915 if (!pid)
2916 goto out;
2917
2918 ftrace_pid_trace = pid;
2919
2920 set_ftrace_pid_task(ftrace_pid_trace);
2921 }
2922
2923 /* update the function call */
2924 ftrace_update_pid_func();
2925 ftrace_startup_enable(0);
2926
2927 out:
2928 mutex_unlock(&ftrace_lock);
2929
2930 return cnt;
2931 }
2932
2933 static const struct file_operations ftrace_pid_fops = {
2934 .read = ftrace_pid_read,
2935 .write = ftrace_pid_write,
2936 };
2937
2938 static __init int ftrace_init_debugfs(void)
2939 {
2940 struct dentry *d_tracer;
2941
2942 d_tracer = tracing_init_dentry();
2943 if (!d_tracer)
2944 return 0;
2945
2946 ftrace_init_dyn_debugfs(d_tracer);
2947
2948 trace_create_file("set_ftrace_pid", 0644, d_tracer,
2949 NULL, &ftrace_pid_fops);
2950
2951 ftrace_profile_debugfs(d_tracer);
2952
2953 return 0;
2954 }
2955 fs_initcall(ftrace_init_debugfs);
2956
2957 /**
2958 * ftrace_kill - kill ftrace
2959 *
2960 * This function should be used by panic code. It stops ftrace
2961 * but in a not so nice way. If you need to simply kill ftrace
2962 * from a non-atomic section, use ftrace_kill.
2963 */
2964 void ftrace_kill(void)
2965 {
2966 ftrace_disabled = 1;
2967 ftrace_enabled = 0;
2968 clear_ftrace_function();
2969 }
2970
2971 /**
2972 * register_ftrace_function - register a function for profiling
2973 * @ops - ops structure that holds the function for profiling.
2974 *
2975 * Register a function to be called by all functions in the
2976 * kernel.
2977 *
2978 * Note: @ops->func and all the functions it calls must be labeled
2979 * with "notrace", otherwise it will go into a
2980 * recursive loop.
2981 */
2982 int register_ftrace_function(struct ftrace_ops *ops)
2983 {
2984 int ret;
2985
2986 if (unlikely(ftrace_disabled))
2987 return -1;
2988
2989 mutex_lock(&ftrace_lock);
2990
2991 ret = __register_ftrace_function(ops);
2992 ftrace_startup(0);
2993
2994 mutex_unlock(&ftrace_lock);
2995 return ret;
2996 }
2997
2998 /**
2999 * unregister_ftrace_function - unregister a function for profiling.
3000 * @ops - ops structure that holds the function to unregister
3001 *
3002 * Unregister a function that was added to be called by ftrace profiling.
3003 */
3004 int unregister_ftrace_function(struct ftrace_ops *ops)
3005 {
3006 int ret;
3007
3008 mutex_lock(&ftrace_lock);
3009 ret = __unregister_ftrace_function(ops);
3010 ftrace_shutdown(0);
3011 mutex_unlock(&ftrace_lock);
3012
3013 return ret;
3014 }
3015
3016 int
3017 ftrace_enable_sysctl(struct ctl_table *table, int write,
3018 struct file *file, void __user *buffer, size_t *lenp,
3019 loff_t *ppos)
3020 {
3021 int ret;
3022
3023 if (unlikely(ftrace_disabled))
3024 return -ENODEV;
3025
3026 mutex_lock(&ftrace_lock);
3027
3028 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
3029
3030 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
3031 goto out;
3032
3033 last_ftrace_enabled = !!ftrace_enabled;
3034
3035 if (ftrace_enabled) {
3036
3037 ftrace_startup_sysctl();
3038
3039 /* we are starting ftrace again */
3040 if (ftrace_list != &ftrace_list_end) {
3041 if (ftrace_list->next == &ftrace_list_end)
3042 ftrace_trace_function = ftrace_list->func;
3043 else
3044 ftrace_trace_function = ftrace_list_func;
3045 }
3046
3047 } else {
3048 /* stopping ftrace calls (just send to ftrace_stub) */
3049 ftrace_trace_function = ftrace_stub;
3050
3051 ftrace_shutdown_sysctl();
3052 }
3053
3054 out:
3055 mutex_unlock(&ftrace_lock);
3056 return ret;
3057 }
3058
3059 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3060
3061 static int ftrace_graph_active;
3062 static struct notifier_block ftrace_suspend_notifier;
3063
3064 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
3065 {
3066 return 0;
3067 }
3068
3069 /* The callbacks that hook a function */
3070 trace_func_graph_ret_t ftrace_graph_return =
3071 (trace_func_graph_ret_t)ftrace_stub;
3072 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
3073
3074 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
3075 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
3076 {
3077 int i;
3078 int ret = 0;
3079 unsigned long flags;
3080 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
3081 struct task_struct *g, *t;
3082
3083 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
3084 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
3085 * sizeof(struct ftrace_ret_stack),
3086 GFP_KERNEL);
3087 if (!ret_stack_list[i]) {
3088 start = 0;
3089 end = i;
3090 ret = -ENOMEM;
3091 goto free;
3092 }
3093 }
3094
3095 read_lock_irqsave(&tasklist_lock, flags);
3096 do_each_thread(g, t) {
3097 if (start == end) {
3098 ret = -EAGAIN;
3099 goto unlock;
3100 }
3101
3102 if (t->ret_stack == NULL) {
3103 atomic_set(&t->tracing_graph_pause, 0);
3104 atomic_set(&t->trace_overrun, 0);
3105 t->curr_ret_stack = -1;
3106 /* Make sure the tasks see the -1 first: */
3107 smp_wmb();
3108 t->ret_stack = ret_stack_list[start++];
3109 }
3110 } while_each_thread(g, t);
3111
3112 unlock:
3113 read_unlock_irqrestore(&tasklist_lock, flags);
3114 free:
3115 for (i = start; i < end; i++)
3116 kfree(ret_stack_list[i]);
3117 return ret;
3118 }
3119
3120 static void
3121 ftrace_graph_probe_sched_switch(struct rq *__rq, struct task_struct *prev,
3122 struct task_struct *next)
3123 {
3124 unsigned long long timestamp;
3125 int index;
3126
3127 /*
3128 * Does the user want to count the time a function was asleep.
3129 * If so, do not update the time stamps.
3130 */
3131 if (trace_flags & TRACE_ITER_SLEEP_TIME)
3132 return;
3133
3134 timestamp = trace_clock_local();
3135
3136 prev->ftrace_timestamp = timestamp;
3137
3138 /* only process tasks that we timestamped */
3139 if (!next->ftrace_timestamp)
3140 return;
3141
3142 /*
3143 * Update all the counters in next to make up for the
3144 * time next was sleeping.
3145 */
3146 timestamp -= next->ftrace_timestamp;
3147
3148 for (index = next->curr_ret_stack; index >= 0; index--)
3149 next->ret_stack[index].calltime += timestamp;
3150 }
3151
3152 /* Allocate a return stack for each task */
3153 static int start_graph_tracing(void)
3154 {
3155 struct ftrace_ret_stack **ret_stack_list;
3156 int ret, cpu;
3157
3158 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
3159 sizeof(struct ftrace_ret_stack *),
3160 GFP_KERNEL);
3161
3162 if (!ret_stack_list)
3163 return -ENOMEM;
3164
3165 /* The cpu_boot init_task->ret_stack will never be freed */
3166 for_each_online_cpu(cpu) {
3167 if (!idle_task(cpu)->ret_stack)
3168 ftrace_graph_init_task(idle_task(cpu));
3169 }
3170
3171 do {
3172 ret = alloc_retstack_tasklist(ret_stack_list);
3173 } while (ret == -EAGAIN);
3174
3175 if (!ret) {
3176 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch);
3177 if (ret)
3178 pr_info("ftrace_graph: Couldn't activate tracepoint"
3179 " probe to kernel_sched_switch\n");
3180 }
3181
3182 kfree(ret_stack_list);
3183 return ret;
3184 }
3185
3186 /*
3187 * Hibernation protection.
3188 * The state of the current task is too much unstable during
3189 * suspend/restore to disk. We want to protect against that.
3190 */
3191 static int
3192 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
3193 void *unused)
3194 {
3195 switch (state) {
3196 case PM_HIBERNATION_PREPARE:
3197 pause_graph_tracing();
3198 break;
3199
3200 case PM_POST_HIBERNATION:
3201 unpause_graph_tracing();
3202 break;
3203 }
3204 return NOTIFY_DONE;
3205 }
3206
3207 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
3208 trace_func_graph_ent_t entryfunc)
3209 {
3210 int ret = 0;
3211
3212 mutex_lock(&ftrace_lock);
3213
3214 /* we currently allow only one tracer registered at a time */
3215 if (ftrace_graph_active) {
3216 ret = -EBUSY;
3217 goto out;
3218 }
3219
3220 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
3221 register_pm_notifier(&ftrace_suspend_notifier);
3222
3223 ftrace_graph_active++;
3224 ret = start_graph_tracing();
3225 if (ret) {
3226 ftrace_graph_active--;
3227 goto out;
3228 }
3229
3230 ftrace_graph_return = retfunc;
3231 ftrace_graph_entry = entryfunc;
3232
3233 ftrace_startup(FTRACE_START_FUNC_RET);
3234
3235 out:
3236 mutex_unlock(&ftrace_lock);
3237 return ret;
3238 }
3239
3240 void unregister_ftrace_graph(void)
3241 {
3242 mutex_lock(&ftrace_lock);
3243
3244 if (unlikely(!ftrace_graph_active))
3245 goto out;
3246
3247 ftrace_graph_active--;
3248 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch);
3249 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
3250 ftrace_graph_entry = ftrace_graph_entry_stub;
3251 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
3252 unregister_pm_notifier(&ftrace_suspend_notifier);
3253
3254 out:
3255 mutex_unlock(&ftrace_lock);
3256 }
3257
3258 /* Allocate a return stack for newly created task */
3259 void ftrace_graph_init_task(struct task_struct *t)
3260 {
3261 /* Make sure we do not use the parent ret_stack */
3262 t->ret_stack = NULL;
3263
3264 if (ftrace_graph_active) {
3265 struct ftrace_ret_stack *ret_stack;
3266
3267 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
3268 * sizeof(struct ftrace_ret_stack),
3269 GFP_KERNEL);
3270 if (!ret_stack)
3271 return;
3272 t->curr_ret_stack = -1;
3273 atomic_set(&t->tracing_graph_pause, 0);
3274 atomic_set(&t->trace_overrun, 0);
3275 t->ftrace_timestamp = 0;
3276 /* make curr_ret_stack visable before we add the ret_stack */
3277 smp_wmb();
3278 t->ret_stack = ret_stack;
3279 }
3280 }
3281
3282 void ftrace_graph_exit_task(struct task_struct *t)
3283 {
3284 struct ftrace_ret_stack *ret_stack = t->ret_stack;
3285
3286 t->ret_stack = NULL;
3287 /* NULL must become visible to IRQs before we free it: */
3288 barrier();
3289
3290 kfree(ret_stack);
3291 }
3292
3293 void ftrace_graph_stop(void)
3294 {
3295 ftrace_stop();
3296 }
3297 #endif
3298
This page took 0.143358 seconds and 5 git commands to generate.