ftrace: improve seq_operation of ftrace
[deliverable/linux.git] / kernel / trace / ftrace.c
1 /*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/list.h>
29
30 #include <asm/ftrace.h>
31
32 #include "trace.h"
33
34 #define FTRACE_WARN_ON(cond) \
35 do { \
36 if (WARN_ON(cond)) \
37 ftrace_kill(); \
38 } while (0)
39
40 #define FTRACE_WARN_ON_ONCE(cond) \
41 do { \
42 if (WARN_ON_ONCE(cond)) \
43 ftrace_kill(); \
44 } while (0)
45
46 /* ftrace_enabled is a method to turn ftrace on or off */
47 int ftrace_enabled __read_mostly;
48 static int last_ftrace_enabled;
49
50 /* ftrace_pid_trace >= 0 will only trace threads with this pid */
51 static int ftrace_pid_trace = -1;
52
53 /* Quick disabling of function tracer. */
54 int function_trace_stop;
55
56 /*
57 * ftrace_disabled is set when an anomaly is discovered.
58 * ftrace_disabled is much stronger than ftrace_enabled.
59 */
60 static int ftrace_disabled __read_mostly;
61
62 static DEFINE_SPINLOCK(ftrace_lock);
63 static DEFINE_MUTEX(ftrace_sysctl_lock);
64 static DEFINE_MUTEX(ftrace_start_lock);
65
66 static struct ftrace_ops ftrace_list_end __read_mostly =
67 {
68 .func = ftrace_stub,
69 };
70
71 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
72 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
73 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
74 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
75
76 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
77 {
78 struct ftrace_ops *op = ftrace_list;
79
80 /* in case someone actually ports this to alpha! */
81 read_barrier_depends();
82
83 while (op != &ftrace_list_end) {
84 /* silly alpha */
85 read_barrier_depends();
86 op->func(ip, parent_ip);
87 op = op->next;
88 };
89 }
90
91 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
92 {
93 if (current->pid != ftrace_pid_trace)
94 return;
95
96 ftrace_pid_function(ip, parent_ip);
97 }
98
99 static void set_ftrace_pid_function(ftrace_func_t func)
100 {
101 /* do not set ftrace_pid_function to itself! */
102 if (func != ftrace_pid_func)
103 ftrace_pid_function = func;
104 }
105
106 /**
107 * clear_ftrace_function - reset the ftrace function
108 *
109 * This NULLs the ftrace function and in essence stops
110 * tracing. There may be lag
111 */
112 void clear_ftrace_function(void)
113 {
114 ftrace_trace_function = ftrace_stub;
115 __ftrace_trace_function = ftrace_stub;
116 ftrace_pid_function = ftrace_stub;
117 }
118
119 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
120 /*
121 * For those archs that do not test ftrace_trace_stop in their
122 * mcount call site, we need to do it from C.
123 */
124 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
125 {
126 if (function_trace_stop)
127 return;
128
129 __ftrace_trace_function(ip, parent_ip);
130 }
131 #endif
132
133 static int __register_ftrace_function(struct ftrace_ops *ops)
134 {
135 /* should not be called from interrupt context */
136 spin_lock(&ftrace_lock);
137
138 ops->next = ftrace_list;
139 /*
140 * We are entering ops into the ftrace_list but another
141 * CPU might be walking that list. We need to make sure
142 * the ops->next pointer is valid before another CPU sees
143 * the ops pointer included into the ftrace_list.
144 */
145 smp_wmb();
146 ftrace_list = ops;
147
148 if (ftrace_enabled) {
149 ftrace_func_t func;
150
151 if (ops->next == &ftrace_list_end)
152 func = ops->func;
153 else
154 func = ftrace_list_func;
155
156 if (ftrace_pid_trace >= 0) {
157 set_ftrace_pid_function(func);
158 func = ftrace_pid_func;
159 }
160
161 /*
162 * For one func, simply call it directly.
163 * For more than one func, call the chain.
164 */
165 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
166 ftrace_trace_function = func;
167 #else
168 __ftrace_trace_function = func;
169 ftrace_trace_function = ftrace_test_stop_func;
170 #endif
171 }
172
173 spin_unlock(&ftrace_lock);
174
175 return 0;
176 }
177
178 static int __unregister_ftrace_function(struct ftrace_ops *ops)
179 {
180 struct ftrace_ops **p;
181 int ret = 0;
182
183 /* should not be called from interrupt context */
184 spin_lock(&ftrace_lock);
185
186 /*
187 * If we are removing the last function, then simply point
188 * to the ftrace_stub.
189 */
190 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
191 ftrace_trace_function = ftrace_stub;
192 ftrace_list = &ftrace_list_end;
193 goto out;
194 }
195
196 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
197 if (*p == ops)
198 break;
199
200 if (*p != ops) {
201 ret = -1;
202 goto out;
203 }
204
205 *p = (*p)->next;
206
207 if (ftrace_enabled) {
208 /* If we only have one func left, then call that directly */
209 if (ftrace_list->next == &ftrace_list_end) {
210 ftrace_func_t func = ftrace_list->func;
211
212 if (ftrace_pid_trace >= 0) {
213 set_ftrace_pid_function(func);
214 func = ftrace_pid_func;
215 }
216 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
217 ftrace_trace_function = func;
218 #else
219 __ftrace_trace_function = func;
220 #endif
221 }
222 }
223
224 out:
225 spin_unlock(&ftrace_lock);
226
227 return ret;
228 }
229
230 static void ftrace_update_pid_func(void)
231 {
232 ftrace_func_t func;
233
234 /* should not be called from interrupt context */
235 spin_lock(&ftrace_lock);
236
237 if (ftrace_trace_function == ftrace_stub)
238 goto out;
239
240 func = ftrace_trace_function;
241
242 if (ftrace_pid_trace >= 0) {
243 set_ftrace_pid_function(func);
244 func = ftrace_pid_func;
245 } else {
246 if (func != ftrace_pid_func)
247 goto out;
248
249 set_ftrace_pid_function(func);
250 }
251
252 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
253 ftrace_trace_function = func;
254 #else
255 __ftrace_trace_function = func;
256 #endif
257
258 out:
259 spin_unlock(&ftrace_lock);
260 }
261
262 #ifdef CONFIG_DYNAMIC_FTRACE
263 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
264 # error Dynamic ftrace depends on MCOUNT_RECORD
265 #endif
266
267 /*
268 * Since MCOUNT_ADDR may point to mcount itself, we do not want
269 * to get it confused by reading a reference in the code as we
270 * are parsing on objcopy output of text. Use a variable for
271 * it instead.
272 */
273 static unsigned long mcount_addr = MCOUNT_ADDR;
274
275 enum {
276 FTRACE_ENABLE_CALLS = (1 << 0),
277 FTRACE_DISABLE_CALLS = (1 << 1),
278 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
279 FTRACE_ENABLE_MCOUNT = (1 << 3),
280 FTRACE_DISABLE_MCOUNT = (1 << 4),
281 FTRACE_START_FUNC_RET = (1 << 5),
282 FTRACE_STOP_FUNC_RET = (1 << 6),
283 };
284
285 static int ftrace_filtered;
286
287 static LIST_HEAD(ftrace_new_addrs);
288
289 static DEFINE_MUTEX(ftrace_regex_lock);
290
291 struct ftrace_page {
292 struct ftrace_page *next;
293 unsigned long index;
294 struct dyn_ftrace records[];
295 };
296
297 #define ENTRIES_PER_PAGE \
298 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
299
300 /* estimate from running different kernels */
301 #define NR_TO_INIT 10000
302
303 static struct ftrace_page *ftrace_pages_start;
304 static struct ftrace_page *ftrace_pages;
305
306 static struct dyn_ftrace *ftrace_free_records;
307
308
309 #ifdef CONFIG_KPROBES
310
311 static int frozen_record_count;
312
313 static inline void freeze_record(struct dyn_ftrace *rec)
314 {
315 if (!(rec->flags & FTRACE_FL_FROZEN)) {
316 rec->flags |= FTRACE_FL_FROZEN;
317 frozen_record_count++;
318 }
319 }
320
321 static inline void unfreeze_record(struct dyn_ftrace *rec)
322 {
323 if (rec->flags & FTRACE_FL_FROZEN) {
324 rec->flags &= ~FTRACE_FL_FROZEN;
325 frozen_record_count--;
326 }
327 }
328
329 static inline int record_frozen(struct dyn_ftrace *rec)
330 {
331 return rec->flags & FTRACE_FL_FROZEN;
332 }
333 #else
334 # define freeze_record(rec) ({ 0; })
335 # define unfreeze_record(rec) ({ 0; })
336 # define record_frozen(rec) ({ 0; })
337 #endif /* CONFIG_KPROBES */
338
339 static void ftrace_free_rec(struct dyn_ftrace *rec)
340 {
341 rec->ip = (unsigned long)ftrace_free_records;
342 ftrace_free_records = rec;
343 rec->flags |= FTRACE_FL_FREE;
344 }
345
346 void ftrace_release(void *start, unsigned long size)
347 {
348 struct dyn_ftrace *rec;
349 struct ftrace_page *pg;
350 unsigned long s = (unsigned long)start;
351 unsigned long e = s + size;
352 int i;
353
354 if (ftrace_disabled || !start)
355 return;
356
357 /* should not be called from interrupt context */
358 spin_lock(&ftrace_lock);
359
360 for (pg = ftrace_pages_start; pg; pg = pg->next) {
361 for (i = 0; i < pg->index; i++) {
362 rec = &pg->records[i];
363
364 if ((rec->ip >= s) && (rec->ip < e))
365 ftrace_free_rec(rec);
366 }
367 }
368 spin_unlock(&ftrace_lock);
369 }
370
371 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
372 {
373 struct dyn_ftrace *rec;
374
375 /* First check for freed records */
376 if (ftrace_free_records) {
377 rec = ftrace_free_records;
378
379 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
380 FTRACE_WARN_ON_ONCE(1);
381 ftrace_free_records = NULL;
382 return NULL;
383 }
384
385 ftrace_free_records = (void *)rec->ip;
386 memset(rec, 0, sizeof(*rec));
387 return rec;
388 }
389
390 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
391 if (!ftrace_pages->next) {
392 /* allocate another page */
393 ftrace_pages->next =
394 (void *)get_zeroed_page(GFP_KERNEL);
395 if (!ftrace_pages->next)
396 return NULL;
397 }
398 ftrace_pages = ftrace_pages->next;
399 }
400
401 return &ftrace_pages->records[ftrace_pages->index++];
402 }
403
404 static struct dyn_ftrace *
405 ftrace_record_ip(unsigned long ip)
406 {
407 struct dyn_ftrace *rec;
408
409 if (ftrace_disabled)
410 return NULL;
411
412 rec = ftrace_alloc_dyn_node(ip);
413 if (!rec)
414 return NULL;
415
416 rec->ip = ip;
417
418 list_add(&rec->list, &ftrace_new_addrs);
419
420 return rec;
421 }
422
423 static void print_ip_ins(const char *fmt, unsigned char *p)
424 {
425 int i;
426
427 printk(KERN_CONT "%s", fmt);
428
429 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
430 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
431 }
432
433 static void ftrace_bug(int failed, unsigned long ip)
434 {
435 switch (failed) {
436 case -EFAULT:
437 FTRACE_WARN_ON_ONCE(1);
438 pr_info("ftrace faulted on modifying ");
439 print_ip_sym(ip);
440 break;
441 case -EINVAL:
442 FTRACE_WARN_ON_ONCE(1);
443 pr_info("ftrace failed to modify ");
444 print_ip_sym(ip);
445 print_ip_ins(" actual: ", (unsigned char *)ip);
446 printk(KERN_CONT "\n");
447 break;
448 case -EPERM:
449 FTRACE_WARN_ON_ONCE(1);
450 pr_info("ftrace faulted on writing ");
451 print_ip_sym(ip);
452 break;
453 default:
454 FTRACE_WARN_ON_ONCE(1);
455 pr_info("ftrace faulted on unknown error ");
456 print_ip_sym(ip);
457 }
458 }
459
460
461 static int
462 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
463 {
464 unsigned long ip, fl;
465 unsigned long ftrace_addr;
466
467 ftrace_addr = (unsigned long)ftrace_caller;
468
469 ip = rec->ip;
470
471 /*
472 * If this record is not to be traced and
473 * it is not enabled then do nothing.
474 *
475 * If this record is not to be traced and
476 * it is enabled then disabled it.
477 *
478 */
479 if (rec->flags & FTRACE_FL_NOTRACE) {
480 if (rec->flags & FTRACE_FL_ENABLED)
481 rec->flags &= ~FTRACE_FL_ENABLED;
482 else
483 return 0;
484
485 } else if (ftrace_filtered && enable) {
486 /*
487 * Filtering is on:
488 */
489
490 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
491
492 /* Record is filtered and enabled, do nothing */
493 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
494 return 0;
495
496 /* Record is not filtered and is not enabled do nothing */
497 if (!fl)
498 return 0;
499
500 /* Record is not filtered but enabled, disable it */
501 if (fl == FTRACE_FL_ENABLED)
502 rec->flags &= ~FTRACE_FL_ENABLED;
503 else
504 /* Otherwise record is filtered but not enabled, enable it */
505 rec->flags |= FTRACE_FL_ENABLED;
506 } else {
507 /* Disable or not filtered */
508
509 if (enable) {
510 /* if record is enabled, do nothing */
511 if (rec->flags & FTRACE_FL_ENABLED)
512 return 0;
513
514 rec->flags |= FTRACE_FL_ENABLED;
515
516 } else {
517
518 /* if record is not enabled do nothing */
519 if (!(rec->flags & FTRACE_FL_ENABLED))
520 return 0;
521
522 rec->flags &= ~FTRACE_FL_ENABLED;
523 }
524 }
525
526 if (rec->flags & FTRACE_FL_ENABLED)
527 return ftrace_make_call(rec, ftrace_addr);
528 else
529 return ftrace_make_nop(NULL, rec, ftrace_addr);
530 }
531
532 static void ftrace_replace_code(int enable)
533 {
534 int i, failed;
535 struct dyn_ftrace *rec;
536 struct ftrace_page *pg;
537
538 for (pg = ftrace_pages_start; pg; pg = pg->next) {
539 for (i = 0; i < pg->index; i++) {
540 rec = &pg->records[i];
541
542 /*
543 * Skip over free records and records that have
544 * failed.
545 */
546 if (rec->flags & FTRACE_FL_FREE ||
547 rec->flags & FTRACE_FL_FAILED)
548 continue;
549
550 /* ignore updates to this record's mcount site */
551 if (get_kprobe((void *)rec->ip)) {
552 freeze_record(rec);
553 continue;
554 } else {
555 unfreeze_record(rec);
556 }
557
558 failed = __ftrace_replace_code(rec, enable);
559 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
560 rec->flags |= FTRACE_FL_FAILED;
561 if ((system_state == SYSTEM_BOOTING) ||
562 !core_kernel_text(rec->ip)) {
563 ftrace_free_rec(rec);
564 } else
565 ftrace_bug(failed, rec->ip);
566 }
567 }
568 }
569 }
570
571 static int
572 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
573 {
574 unsigned long ip;
575 int ret;
576
577 ip = rec->ip;
578
579 ret = ftrace_make_nop(mod, rec, mcount_addr);
580 if (ret) {
581 ftrace_bug(ret, ip);
582 rec->flags |= FTRACE_FL_FAILED;
583 return 0;
584 }
585 return 1;
586 }
587
588 static int __ftrace_modify_code(void *data)
589 {
590 int *command = data;
591
592 if (*command & FTRACE_ENABLE_CALLS)
593 ftrace_replace_code(1);
594 else if (*command & FTRACE_DISABLE_CALLS)
595 ftrace_replace_code(0);
596
597 if (*command & FTRACE_UPDATE_TRACE_FUNC)
598 ftrace_update_ftrace_func(ftrace_trace_function);
599
600 if (*command & FTRACE_START_FUNC_RET)
601 ftrace_enable_ftrace_graph_caller();
602 else if (*command & FTRACE_STOP_FUNC_RET)
603 ftrace_disable_ftrace_graph_caller();
604
605 return 0;
606 }
607
608 static void ftrace_run_update_code(int command)
609 {
610 stop_machine(__ftrace_modify_code, &command, NULL);
611 }
612
613 static ftrace_func_t saved_ftrace_func;
614 static int ftrace_start_up;
615
616 static void ftrace_startup_enable(int command)
617 {
618 if (saved_ftrace_func != ftrace_trace_function) {
619 saved_ftrace_func = ftrace_trace_function;
620 command |= FTRACE_UPDATE_TRACE_FUNC;
621 }
622
623 if (!command || !ftrace_enabled)
624 return;
625
626 ftrace_run_update_code(command);
627 }
628
629 static void ftrace_startup(int command)
630 {
631 if (unlikely(ftrace_disabled))
632 return;
633
634 mutex_lock(&ftrace_start_lock);
635 ftrace_start_up++;
636 command |= FTRACE_ENABLE_CALLS;
637
638 ftrace_startup_enable(command);
639
640 mutex_unlock(&ftrace_start_lock);
641 }
642
643 static void ftrace_shutdown(int command)
644 {
645 if (unlikely(ftrace_disabled))
646 return;
647
648 mutex_lock(&ftrace_start_lock);
649 ftrace_start_up--;
650 if (!ftrace_start_up)
651 command |= FTRACE_DISABLE_CALLS;
652
653 if (saved_ftrace_func != ftrace_trace_function) {
654 saved_ftrace_func = ftrace_trace_function;
655 command |= FTRACE_UPDATE_TRACE_FUNC;
656 }
657
658 if (!command || !ftrace_enabled)
659 goto out;
660
661 ftrace_run_update_code(command);
662 out:
663 mutex_unlock(&ftrace_start_lock);
664 }
665
666 static void ftrace_startup_sysctl(void)
667 {
668 int command = FTRACE_ENABLE_MCOUNT;
669
670 if (unlikely(ftrace_disabled))
671 return;
672
673 mutex_lock(&ftrace_start_lock);
674 /* Force update next time */
675 saved_ftrace_func = NULL;
676 /* ftrace_start_up is true if we want ftrace running */
677 if (ftrace_start_up)
678 command |= FTRACE_ENABLE_CALLS;
679
680 ftrace_run_update_code(command);
681 mutex_unlock(&ftrace_start_lock);
682 }
683
684 static void ftrace_shutdown_sysctl(void)
685 {
686 int command = FTRACE_DISABLE_MCOUNT;
687
688 if (unlikely(ftrace_disabled))
689 return;
690
691 mutex_lock(&ftrace_start_lock);
692 /* ftrace_start_up is true if ftrace is running */
693 if (ftrace_start_up)
694 command |= FTRACE_DISABLE_CALLS;
695
696 ftrace_run_update_code(command);
697 mutex_unlock(&ftrace_start_lock);
698 }
699
700 static cycle_t ftrace_update_time;
701 static unsigned long ftrace_update_cnt;
702 unsigned long ftrace_update_tot_cnt;
703
704 static int ftrace_update_code(struct module *mod)
705 {
706 struct dyn_ftrace *p, *t;
707 cycle_t start, stop;
708
709 start = ftrace_now(raw_smp_processor_id());
710 ftrace_update_cnt = 0;
711
712 list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
713
714 /* If something went wrong, bail without enabling anything */
715 if (unlikely(ftrace_disabled))
716 return -1;
717
718 list_del_init(&p->list);
719
720 /* convert record (i.e, patch mcount-call with NOP) */
721 if (ftrace_code_disable(mod, p)) {
722 p->flags |= FTRACE_FL_CONVERTED;
723 ftrace_update_cnt++;
724 } else
725 ftrace_free_rec(p);
726 }
727
728 stop = ftrace_now(raw_smp_processor_id());
729 ftrace_update_time = stop - start;
730 ftrace_update_tot_cnt += ftrace_update_cnt;
731
732 return 0;
733 }
734
735 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
736 {
737 struct ftrace_page *pg;
738 int cnt;
739 int i;
740
741 /* allocate a few pages */
742 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
743 if (!ftrace_pages_start)
744 return -1;
745
746 /*
747 * Allocate a few more pages.
748 *
749 * TODO: have some parser search vmlinux before
750 * final linking to find all calls to ftrace.
751 * Then we can:
752 * a) know how many pages to allocate.
753 * and/or
754 * b) set up the table then.
755 *
756 * The dynamic code is still necessary for
757 * modules.
758 */
759
760 pg = ftrace_pages = ftrace_pages_start;
761
762 cnt = num_to_init / ENTRIES_PER_PAGE;
763 pr_info("ftrace: allocating %ld entries in %d pages\n",
764 num_to_init, cnt + 1);
765
766 for (i = 0; i < cnt; i++) {
767 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
768
769 /* If we fail, we'll try later anyway */
770 if (!pg->next)
771 break;
772
773 pg = pg->next;
774 }
775
776 return 0;
777 }
778
779 enum {
780 FTRACE_ITER_FILTER = (1 << 0),
781 FTRACE_ITER_CONT = (1 << 1),
782 FTRACE_ITER_NOTRACE = (1 << 2),
783 FTRACE_ITER_FAILURES = (1 << 3),
784 };
785
786 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
787
788 struct ftrace_iterator {
789 struct ftrace_page *pg;
790 unsigned idx;
791 unsigned flags;
792 unsigned char buffer[FTRACE_BUFF_MAX+1];
793 unsigned buffer_idx;
794 unsigned filtered;
795 };
796
797 static void *
798 t_next(struct seq_file *m, void *v, loff_t *pos)
799 {
800 struct ftrace_iterator *iter = m->private;
801 struct dyn_ftrace *rec = NULL;
802
803 (*pos)++;
804
805 /* should not be called from interrupt context */
806 spin_lock(&ftrace_lock);
807 retry:
808 if (iter->idx >= iter->pg->index) {
809 if (iter->pg->next) {
810 iter->pg = iter->pg->next;
811 iter->idx = 0;
812 goto retry;
813 } else {
814 iter->idx = -1;
815 }
816 } else {
817 rec = &iter->pg->records[iter->idx++];
818 if ((rec->flags & FTRACE_FL_FREE) ||
819
820 (!(iter->flags & FTRACE_ITER_FAILURES) &&
821 (rec->flags & FTRACE_FL_FAILED)) ||
822
823 ((iter->flags & FTRACE_ITER_FAILURES) &&
824 !(rec->flags & FTRACE_FL_FAILED)) ||
825
826 ((iter->flags & FTRACE_ITER_FILTER) &&
827 !(rec->flags & FTRACE_FL_FILTER)) ||
828
829 ((iter->flags & FTRACE_ITER_NOTRACE) &&
830 !(rec->flags & FTRACE_FL_NOTRACE))) {
831 rec = NULL;
832 goto retry;
833 }
834 }
835 spin_unlock(&ftrace_lock);
836
837 return rec;
838 }
839
840 static void *t_start(struct seq_file *m, loff_t *pos)
841 {
842 struct ftrace_iterator *iter = m->private;
843 void *p = NULL;
844
845 if (*pos > 0) {
846 if (iter->idx < 0)
847 return p;
848 (*pos)--;
849 iter->idx--;
850 }
851
852 p = t_next(m, p, pos);
853
854 return p;
855 }
856
857 static void t_stop(struct seq_file *m, void *p)
858 {
859 }
860
861 static int t_show(struct seq_file *m, void *v)
862 {
863 struct dyn_ftrace *rec = v;
864 char str[KSYM_SYMBOL_LEN];
865
866 if (!rec)
867 return 0;
868
869 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
870
871 seq_printf(m, "%s\n", str);
872
873 return 0;
874 }
875
876 static struct seq_operations show_ftrace_seq_ops = {
877 .start = t_start,
878 .next = t_next,
879 .stop = t_stop,
880 .show = t_show,
881 };
882
883 static int
884 ftrace_avail_open(struct inode *inode, struct file *file)
885 {
886 struct ftrace_iterator *iter;
887 int ret;
888
889 if (unlikely(ftrace_disabled))
890 return -ENODEV;
891
892 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
893 if (!iter)
894 return -ENOMEM;
895
896 iter->pg = ftrace_pages_start;
897
898 ret = seq_open(file, &show_ftrace_seq_ops);
899 if (!ret) {
900 struct seq_file *m = file->private_data;
901
902 m->private = iter;
903 } else {
904 kfree(iter);
905 }
906
907 return ret;
908 }
909
910 int ftrace_avail_release(struct inode *inode, struct file *file)
911 {
912 struct seq_file *m = (struct seq_file *)file->private_data;
913 struct ftrace_iterator *iter = m->private;
914
915 seq_release(inode, file);
916 kfree(iter);
917
918 return 0;
919 }
920
921 static int
922 ftrace_failures_open(struct inode *inode, struct file *file)
923 {
924 int ret;
925 struct seq_file *m;
926 struct ftrace_iterator *iter;
927
928 ret = ftrace_avail_open(inode, file);
929 if (!ret) {
930 m = (struct seq_file *)file->private_data;
931 iter = (struct ftrace_iterator *)m->private;
932 iter->flags = FTRACE_ITER_FAILURES;
933 }
934
935 return ret;
936 }
937
938
939 static void ftrace_filter_reset(int enable)
940 {
941 struct ftrace_page *pg;
942 struct dyn_ftrace *rec;
943 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
944 unsigned i;
945
946 /* should not be called from interrupt context */
947 spin_lock(&ftrace_lock);
948 if (enable)
949 ftrace_filtered = 0;
950 pg = ftrace_pages_start;
951 while (pg) {
952 for (i = 0; i < pg->index; i++) {
953 rec = &pg->records[i];
954 if (rec->flags & FTRACE_FL_FAILED)
955 continue;
956 rec->flags &= ~type;
957 }
958 pg = pg->next;
959 }
960 spin_unlock(&ftrace_lock);
961 }
962
963 static int
964 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
965 {
966 struct ftrace_iterator *iter;
967 int ret = 0;
968
969 if (unlikely(ftrace_disabled))
970 return -ENODEV;
971
972 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
973 if (!iter)
974 return -ENOMEM;
975
976 mutex_lock(&ftrace_regex_lock);
977 if ((file->f_mode & FMODE_WRITE) &&
978 !(file->f_flags & O_APPEND))
979 ftrace_filter_reset(enable);
980
981 if (file->f_mode & FMODE_READ) {
982 iter->pg = ftrace_pages_start;
983 iter->flags = enable ? FTRACE_ITER_FILTER :
984 FTRACE_ITER_NOTRACE;
985
986 ret = seq_open(file, &show_ftrace_seq_ops);
987 if (!ret) {
988 struct seq_file *m = file->private_data;
989 m->private = iter;
990 } else
991 kfree(iter);
992 } else
993 file->private_data = iter;
994 mutex_unlock(&ftrace_regex_lock);
995
996 return ret;
997 }
998
999 static int
1000 ftrace_filter_open(struct inode *inode, struct file *file)
1001 {
1002 return ftrace_regex_open(inode, file, 1);
1003 }
1004
1005 static int
1006 ftrace_notrace_open(struct inode *inode, struct file *file)
1007 {
1008 return ftrace_regex_open(inode, file, 0);
1009 }
1010
1011 static ssize_t
1012 ftrace_regex_read(struct file *file, char __user *ubuf,
1013 size_t cnt, loff_t *ppos)
1014 {
1015 if (file->f_mode & FMODE_READ)
1016 return seq_read(file, ubuf, cnt, ppos);
1017 else
1018 return -EPERM;
1019 }
1020
1021 static loff_t
1022 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1023 {
1024 loff_t ret;
1025
1026 if (file->f_mode & FMODE_READ)
1027 ret = seq_lseek(file, offset, origin);
1028 else
1029 file->f_pos = ret = 1;
1030
1031 return ret;
1032 }
1033
1034 enum {
1035 MATCH_FULL,
1036 MATCH_FRONT_ONLY,
1037 MATCH_MIDDLE_ONLY,
1038 MATCH_END_ONLY,
1039 };
1040
1041 static void
1042 ftrace_match(unsigned char *buff, int len, int enable)
1043 {
1044 char str[KSYM_SYMBOL_LEN];
1045 char *search = NULL;
1046 struct ftrace_page *pg;
1047 struct dyn_ftrace *rec;
1048 int type = MATCH_FULL;
1049 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1050 unsigned i, match = 0, search_len = 0;
1051
1052 for (i = 0; i < len; i++) {
1053 if (buff[i] == '*') {
1054 if (!i) {
1055 search = buff + i + 1;
1056 type = MATCH_END_ONLY;
1057 search_len = len - (i + 1);
1058 } else {
1059 if (type == MATCH_END_ONLY) {
1060 type = MATCH_MIDDLE_ONLY;
1061 } else {
1062 match = i;
1063 type = MATCH_FRONT_ONLY;
1064 }
1065 buff[i] = 0;
1066 break;
1067 }
1068 }
1069 }
1070
1071 /* should not be called from interrupt context */
1072 spin_lock(&ftrace_lock);
1073 if (enable)
1074 ftrace_filtered = 1;
1075 pg = ftrace_pages_start;
1076 while (pg) {
1077 for (i = 0; i < pg->index; i++) {
1078 int matched = 0;
1079 char *ptr;
1080
1081 rec = &pg->records[i];
1082 if (rec->flags & FTRACE_FL_FAILED)
1083 continue;
1084 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1085 switch (type) {
1086 case MATCH_FULL:
1087 if (strcmp(str, buff) == 0)
1088 matched = 1;
1089 break;
1090 case MATCH_FRONT_ONLY:
1091 if (memcmp(str, buff, match) == 0)
1092 matched = 1;
1093 break;
1094 case MATCH_MIDDLE_ONLY:
1095 if (strstr(str, search))
1096 matched = 1;
1097 break;
1098 case MATCH_END_ONLY:
1099 ptr = strstr(str, search);
1100 if (ptr && (ptr[search_len] == 0))
1101 matched = 1;
1102 break;
1103 }
1104 if (matched)
1105 rec->flags |= flag;
1106 }
1107 pg = pg->next;
1108 }
1109 spin_unlock(&ftrace_lock);
1110 }
1111
1112 static ssize_t
1113 ftrace_regex_write(struct file *file, const char __user *ubuf,
1114 size_t cnt, loff_t *ppos, int enable)
1115 {
1116 struct ftrace_iterator *iter;
1117 char ch;
1118 size_t read = 0;
1119 ssize_t ret;
1120
1121 if (!cnt || cnt < 0)
1122 return 0;
1123
1124 mutex_lock(&ftrace_regex_lock);
1125
1126 if (file->f_mode & FMODE_READ) {
1127 struct seq_file *m = file->private_data;
1128 iter = m->private;
1129 } else
1130 iter = file->private_data;
1131
1132 if (!*ppos) {
1133 iter->flags &= ~FTRACE_ITER_CONT;
1134 iter->buffer_idx = 0;
1135 }
1136
1137 ret = get_user(ch, ubuf++);
1138 if (ret)
1139 goto out;
1140 read++;
1141 cnt--;
1142
1143 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1144 /* skip white space */
1145 while (cnt && isspace(ch)) {
1146 ret = get_user(ch, ubuf++);
1147 if (ret)
1148 goto out;
1149 read++;
1150 cnt--;
1151 }
1152
1153 if (isspace(ch)) {
1154 file->f_pos += read;
1155 ret = read;
1156 goto out;
1157 }
1158
1159 iter->buffer_idx = 0;
1160 }
1161
1162 while (cnt && !isspace(ch)) {
1163 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1164 iter->buffer[iter->buffer_idx++] = ch;
1165 else {
1166 ret = -EINVAL;
1167 goto out;
1168 }
1169 ret = get_user(ch, ubuf++);
1170 if (ret)
1171 goto out;
1172 read++;
1173 cnt--;
1174 }
1175
1176 if (isspace(ch)) {
1177 iter->filtered++;
1178 iter->buffer[iter->buffer_idx] = 0;
1179 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1180 iter->buffer_idx = 0;
1181 } else
1182 iter->flags |= FTRACE_ITER_CONT;
1183
1184
1185 file->f_pos += read;
1186
1187 ret = read;
1188 out:
1189 mutex_unlock(&ftrace_regex_lock);
1190
1191 return ret;
1192 }
1193
1194 static ssize_t
1195 ftrace_filter_write(struct file *file, const char __user *ubuf,
1196 size_t cnt, loff_t *ppos)
1197 {
1198 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1199 }
1200
1201 static ssize_t
1202 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1203 size_t cnt, loff_t *ppos)
1204 {
1205 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1206 }
1207
1208 static void
1209 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1210 {
1211 if (unlikely(ftrace_disabled))
1212 return;
1213
1214 mutex_lock(&ftrace_regex_lock);
1215 if (reset)
1216 ftrace_filter_reset(enable);
1217 if (buf)
1218 ftrace_match(buf, len, enable);
1219 mutex_unlock(&ftrace_regex_lock);
1220 }
1221
1222 /**
1223 * ftrace_set_filter - set a function to filter on in ftrace
1224 * @buf - the string that holds the function filter text.
1225 * @len - the length of the string.
1226 * @reset - non zero to reset all filters before applying this filter.
1227 *
1228 * Filters denote which functions should be enabled when tracing is enabled.
1229 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1230 */
1231 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1232 {
1233 ftrace_set_regex(buf, len, reset, 1);
1234 }
1235
1236 /**
1237 * ftrace_set_notrace - set a function to not trace in ftrace
1238 * @buf - the string that holds the function notrace text.
1239 * @len - the length of the string.
1240 * @reset - non zero to reset all filters before applying this filter.
1241 *
1242 * Notrace Filters denote which functions should not be enabled when tracing
1243 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1244 * for tracing.
1245 */
1246 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1247 {
1248 ftrace_set_regex(buf, len, reset, 0);
1249 }
1250
1251 static int
1252 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1253 {
1254 struct seq_file *m = (struct seq_file *)file->private_data;
1255 struct ftrace_iterator *iter;
1256
1257 mutex_lock(&ftrace_regex_lock);
1258 if (file->f_mode & FMODE_READ) {
1259 iter = m->private;
1260
1261 seq_release(inode, file);
1262 } else
1263 iter = file->private_data;
1264
1265 if (iter->buffer_idx) {
1266 iter->filtered++;
1267 iter->buffer[iter->buffer_idx] = 0;
1268 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1269 }
1270
1271 mutex_lock(&ftrace_sysctl_lock);
1272 mutex_lock(&ftrace_start_lock);
1273 if (ftrace_start_up && ftrace_enabled)
1274 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1275 mutex_unlock(&ftrace_start_lock);
1276 mutex_unlock(&ftrace_sysctl_lock);
1277
1278 kfree(iter);
1279 mutex_unlock(&ftrace_regex_lock);
1280 return 0;
1281 }
1282
1283 static int
1284 ftrace_filter_release(struct inode *inode, struct file *file)
1285 {
1286 return ftrace_regex_release(inode, file, 1);
1287 }
1288
1289 static int
1290 ftrace_notrace_release(struct inode *inode, struct file *file)
1291 {
1292 return ftrace_regex_release(inode, file, 0);
1293 }
1294
1295 static struct file_operations ftrace_avail_fops = {
1296 .open = ftrace_avail_open,
1297 .read = seq_read,
1298 .llseek = seq_lseek,
1299 .release = ftrace_avail_release,
1300 };
1301
1302 static struct file_operations ftrace_failures_fops = {
1303 .open = ftrace_failures_open,
1304 .read = seq_read,
1305 .llseek = seq_lseek,
1306 .release = ftrace_avail_release,
1307 };
1308
1309 static struct file_operations ftrace_filter_fops = {
1310 .open = ftrace_filter_open,
1311 .read = ftrace_regex_read,
1312 .write = ftrace_filter_write,
1313 .llseek = ftrace_regex_lseek,
1314 .release = ftrace_filter_release,
1315 };
1316
1317 static struct file_operations ftrace_notrace_fops = {
1318 .open = ftrace_notrace_open,
1319 .read = ftrace_regex_read,
1320 .write = ftrace_notrace_write,
1321 .llseek = ftrace_regex_lseek,
1322 .release = ftrace_notrace_release,
1323 };
1324
1325 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
1326 {
1327 struct dentry *entry;
1328
1329 entry = debugfs_create_file("available_filter_functions", 0444,
1330 d_tracer, NULL, &ftrace_avail_fops);
1331 if (!entry)
1332 pr_warning("Could not create debugfs "
1333 "'available_filter_functions' entry\n");
1334
1335 entry = debugfs_create_file("failures", 0444,
1336 d_tracer, NULL, &ftrace_failures_fops);
1337 if (!entry)
1338 pr_warning("Could not create debugfs 'failures' entry\n");
1339
1340 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1341 NULL, &ftrace_filter_fops);
1342 if (!entry)
1343 pr_warning("Could not create debugfs "
1344 "'set_ftrace_filter' entry\n");
1345
1346 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1347 NULL, &ftrace_notrace_fops);
1348 if (!entry)
1349 pr_warning("Could not create debugfs "
1350 "'set_ftrace_notrace' entry\n");
1351
1352 return 0;
1353 }
1354
1355 static int ftrace_convert_nops(struct module *mod,
1356 unsigned long *start,
1357 unsigned long *end)
1358 {
1359 unsigned long *p;
1360 unsigned long addr;
1361 unsigned long flags;
1362
1363 mutex_lock(&ftrace_start_lock);
1364 p = start;
1365 while (p < end) {
1366 addr = ftrace_call_adjust(*p++);
1367 /*
1368 * Some architecture linkers will pad between
1369 * the different mcount_loc sections of different
1370 * object files to satisfy alignments.
1371 * Skip any NULL pointers.
1372 */
1373 if (!addr)
1374 continue;
1375 ftrace_record_ip(addr);
1376 }
1377
1378 /* disable interrupts to prevent kstop machine */
1379 local_irq_save(flags);
1380 ftrace_update_code(mod);
1381 local_irq_restore(flags);
1382 mutex_unlock(&ftrace_start_lock);
1383
1384 return 0;
1385 }
1386
1387 void ftrace_init_module(struct module *mod,
1388 unsigned long *start, unsigned long *end)
1389 {
1390 if (ftrace_disabled || start == end)
1391 return;
1392 ftrace_convert_nops(mod, start, end);
1393 }
1394
1395 extern unsigned long __start_mcount_loc[];
1396 extern unsigned long __stop_mcount_loc[];
1397
1398 void __init ftrace_init(void)
1399 {
1400 unsigned long count, addr, flags;
1401 int ret;
1402
1403 /* Keep the ftrace pointer to the stub */
1404 addr = (unsigned long)ftrace_stub;
1405
1406 local_irq_save(flags);
1407 ftrace_dyn_arch_init(&addr);
1408 local_irq_restore(flags);
1409
1410 /* ftrace_dyn_arch_init places the return code in addr */
1411 if (addr)
1412 goto failed;
1413
1414 count = __stop_mcount_loc - __start_mcount_loc;
1415
1416 ret = ftrace_dyn_table_alloc(count);
1417 if (ret)
1418 goto failed;
1419
1420 last_ftrace_enabled = ftrace_enabled = 1;
1421
1422 ret = ftrace_convert_nops(NULL,
1423 __start_mcount_loc,
1424 __stop_mcount_loc);
1425
1426 return;
1427 failed:
1428 ftrace_disabled = 1;
1429 }
1430
1431 #else
1432
1433 static int __init ftrace_nodyn_init(void)
1434 {
1435 ftrace_enabled = 1;
1436 return 0;
1437 }
1438 device_initcall(ftrace_nodyn_init);
1439
1440 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
1441 static inline void ftrace_startup_enable(int command) { }
1442 /* Keep as macros so we do not need to define the commands */
1443 # define ftrace_startup(command) do { } while (0)
1444 # define ftrace_shutdown(command) do { } while (0)
1445 # define ftrace_startup_sysctl() do { } while (0)
1446 # define ftrace_shutdown_sysctl() do { } while (0)
1447 #endif /* CONFIG_DYNAMIC_FTRACE */
1448
1449 static ssize_t
1450 ftrace_pid_read(struct file *file, char __user *ubuf,
1451 size_t cnt, loff_t *ppos)
1452 {
1453 char buf[64];
1454 int r;
1455
1456 if (ftrace_pid_trace >= 0)
1457 r = sprintf(buf, "%u\n", ftrace_pid_trace);
1458 else
1459 r = sprintf(buf, "no pid\n");
1460
1461 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1462 }
1463
1464 static ssize_t
1465 ftrace_pid_write(struct file *filp, const char __user *ubuf,
1466 size_t cnt, loff_t *ppos)
1467 {
1468 char buf[64];
1469 long val;
1470 int ret;
1471
1472 if (cnt >= sizeof(buf))
1473 return -EINVAL;
1474
1475 if (copy_from_user(&buf, ubuf, cnt))
1476 return -EFAULT;
1477
1478 buf[cnt] = 0;
1479
1480 ret = strict_strtol(buf, 10, &val);
1481 if (ret < 0)
1482 return ret;
1483
1484 mutex_lock(&ftrace_start_lock);
1485 if (ret < 0) {
1486 /* disable pid tracing */
1487 if (ftrace_pid_trace < 0)
1488 goto out;
1489 ftrace_pid_trace = -1;
1490
1491 } else {
1492
1493 if (ftrace_pid_trace == val)
1494 goto out;
1495
1496 ftrace_pid_trace = val;
1497 }
1498
1499 /* update the function call */
1500 ftrace_update_pid_func();
1501 ftrace_startup_enable(0);
1502
1503 out:
1504 mutex_unlock(&ftrace_start_lock);
1505
1506 return cnt;
1507 }
1508
1509 static struct file_operations ftrace_pid_fops = {
1510 .read = ftrace_pid_read,
1511 .write = ftrace_pid_write,
1512 };
1513
1514 static __init int ftrace_init_debugfs(void)
1515 {
1516 struct dentry *d_tracer;
1517 struct dentry *entry;
1518
1519 d_tracer = tracing_init_dentry();
1520 if (!d_tracer)
1521 return 0;
1522
1523 ftrace_init_dyn_debugfs(d_tracer);
1524
1525 entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
1526 NULL, &ftrace_pid_fops);
1527 if (!entry)
1528 pr_warning("Could not create debugfs "
1529 "'set_ftrace_pid' entry\n");
1530 return 0;
1531 }
1532
1533 fs_initcall(ftrace_init_debugfs);
1534
1535 /**
1536 * ftrace_kill - kill ftrace
1537 *
1538 * This function should be used by panic code. It stops ftrace
1539 * but in a not so nice way. If you need to simply kill ftrace
1540 * from a non-atomic section, use ftrace_kill.
1541 */
1542 void ftrace_kill(void)
1543 {
1544 ftrace_disabled = 1;
1545 ftrace_enabled = 0;
1546 clear_ftrace_function();
1547 }
1548
1549 /**
1550 * register_ftrace_function - register a function for profiling
1551 * @ops - ops structure that holds the function for profiling.
1552 *
1553 * Register a function to be called by all functions in the
1554 * kernel.
1555 *
1556 * Note: @ops->func and all the functions it calls must be labeled
1557 * with "notrace", otherwise it will go into a
1558 * recursive loop.
1559 */
1560 int register_ftrace_function(struct ftrace_ops *ops)
1561 {
1562 int ret;
1563
1564 if (unlikely(ftrace_disabled))
1565 return -1;
1566
1567 mutex_lock(&ftrace_sysctl_lock);
1568
1569 ret = __register_ftrace_function(ops);
1570 ftrace_startup(0);
1571
1572 mutex_unlock(&ftrace_sysctl_lock);
1573 return ret;
1574 }
1575
1576 /**
1577 * unregister_ftrace_function - unresgister a function for profiling.
1578 * @ops - ops structure that holds the function to unregister
1579 *
1580 * Unregister a function that was added to be called by ftrace profiling.
1581 */
1582 int unregister_ftrace_function(struct ftrace_ops *ops)
1583 {
1584 int ret;
1585
1586 mutex_lock(&ftrace_sysctl_lock);
1587 ret = __unregister_ftrace_function(ops);
1588 ftrace_shutdown(0);
1589 mutex_unlock(&ftrace_sysctl_lock);
1590
1591 return ret;
1592 }
1593
1594 int
1595 ftrace_enable_sysctl(struct ctl_table *table, int write,
1596 struct file *file, void __user *buffer, size_t *lenp,
1597 loff_t *ppos)
1598 {
1599 int ret;
1600
1601 if (unlikely(ftrace_disabled))
1602 return -ENODEV;
1603
1604 mutex_lock(&ftrace_sysctl_lock);
1605
1606 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
1607
1608 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1609 goto out;
1610
1611 last_ftrace_enabled = ftrace_enabled;
1612
1613 if (ftrace_enabled) {
1614
1615 ftrace_startup_sysctl();
1616
1617 /* we are starting ftrace again */
1618 if (ftrace_list != &ftrace_list_end) {
1619 if (ftrace_list->next == &ftrace_list_end)
1620 ftrace_trace_function = ftrace_list->func;
1621 else
1622 ftrace_trace_function = ftrace_list_func;
1623 }
1624
1625 } else {
1626 /* stopping ftrace calls (just send to ftrace_stub) */
1627 ftrace_trace_function = ftrace_stub;
1628
1629 ftrace_shutdown_sysctl();
1630 }
1631
1632 out:
1633 mutex_unlock(&ftrace_sysctl_lock);
1634 return ret;
1635 }
1636
1637 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1638
1639 static atomic_t ftrace_graph_active;
1640
1641 /* The callbacks that hook a function */
1642 trace_func_graph_ret_t ftrace_graph_return =
1643 (trace_func_graph_ret_t)ftrace_stub;
1644 trace_func_graph_ent_t ftrace_graph_entry =
1645 (trace_func_graph_ent_t)ftrace_stub;
1646
1647 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
1648 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
1649 {
1650 int i;
1651 int ret = 0;
1652 unsigned long flags;
1653 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
1654 struct task_struct *g, *t;
1655
1656 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
1657 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
1658 * sizeof(struct ftrace_ret_stack),
1659 GFP_KERNEL);
1660 if (!ret_stack_list[i]) {
1661 start = 0;
1662 end = i;
1663 ret = -ENOMEM;
1664 goto free;
1665 }
1666 }
1667
1668 read_lock_irqsave(&tasklist_lock, flags);
1669 do_each_thread(g, t) {
1670 if (start == end) {
1671 ret = -EAGAIN;
1672 goto unlock;
1673 }
1674
1675 if (t->ret_stack == NULL) {
1676 t->ret_stack = ret_stack_list[start++];
1677 t->curr_ret_stack = -1;
1678 atomic_set(&t->trace_overrun, 0);
1679 }
1680 } while_each_thread(g, t);
1681
1682 unlock:
1683 read_unlock_irqrestore(&tasklist_lock, flags);
1684 free:
1685 for (i = start; i < end; i++)
1686 kfree(ret_stack_list[i]);
1687 return ret;
1688 }
1689
1690 /* Allocate a return stack for each task */
1691 static int start_graph_tracing(void)
1692 {
1693 struct ftrace_ret_stack **ret_stack_list;
1694 int ret;
1695
1696 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
1697 sizeof(struct ftrace_ret_stack *),
1698 GFP_KERNEL);
1699
1700 if (!ret_stack_list)
1701 return -ENOMEM;
1702
1703 do {
1704 ret = alloc_retstack_tasklist(ret_stack_list);
1705 } while (ret == -EAGAIN);
1706
1707 kfree(ret_stack_list);
1708 return ret;
1709 }
1710
1711 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
1712 trace_func_graph_ent_t entryfunc)
1713 {
1714 int ret = 0;
1715
1716 mutex_lock(&ftrace_sysctl_lock);
1717
1718 atomic_inc(&ftrace_graph_active);
1719 ret = start_graph_tracing();
1720 if (ret) {
1721 atomic_dec(&ftrace_graph_active);
1722 goto out;
1723 }
1724
1725 ftrace_graph_return = retfunc;
1726 ftrace_graph_entry = entryfunc;
1727
1728 ftrace_startup(FTRACE_START_FUNC_RET);
1729
1730 out:
1731 mutex_unlock(&ftrace_sysctl_lock);
1732 return ret;
1733 }
1734
1735 void unregister_ftrace_graph(void)
1736 {
1737 mutex_lock(&ftrace_sysctl_lock);
1738
1739 atomic_dec(&ftrace_graph_active);
1740 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
1741 ftrace_graph_entry = (trace_func_graph_ent_t)ftrace_stub;
1742 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
1743
1744 mutex_unlock(&ftrace_sysctl_lock);
1745 }
1746
1747 /* Allocate a return stack for newly created task */
1748 void ftrace_graph_init_task(struct task_struct *t)
1749 {
1750 if (atomic_read(&ftrace_graph_active)) {
1751 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
1752 * sizeof(struct ftrace_ret_stack),
1753 GFP_KERNEL);
1754 if (!t->ret_stack)
1755 return;
1756 t->curr_ret_stack = -1;
1757 atomic_set(&t->trace_overrun, 0);
1758 } else
1759 t->ret_stack = NULL;
1760 }
1761
1762 void ftrace_graph_exit_task(struct task_struct *t)
1763 {
1764 struct ftrace_ret_stack *ret_stack = t->ret_stack;
1765
1766 t->ret_stack = NULL;
1767 /* NULL must become visible to IRQs before we free it: */
1768 barrier();
1769
1770 kfree(ret_stack);
1771 }
1772 #endif
1773
This page took 0.16307 seconds and 5 git commands to generate.