53042f118f2365eeedb5be4cbea70a2fde2570fe
[deliverable/linux.git] / kernel / trace / ftrace.c
1 /*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/list.h>
29
30 #include <asm/ftrace.h>
31
32 #include "trace.h"
33
34 #define FTRACE_WARN_ON(cond) \
35 do { \
36 if (WARN_ON(cond)) \
37 ftrace_kill(); \
38 } while (0)
39
40 #define FTRACE_WARN_ON_ONCE(cond) \
41 do { \
42 if (WARN_ON_ONCE(cond)) \
43 ftrace_kill(); \
44 } while (0)
45
46 /* ftrace_enabled is a method to turn ftrace on or off */
47 int ftrace_enabled __read_mostly;
48 static int last_ftrace_enabled;
49
50 /* Quick disabling of function tracer. */
51 int function_trace_stop;
52
53 /* By default, current tracing type is normal tracing. */
54 enum ftrace_tracing_type_t ftrace_tracing_type = FTRACE_TYPE_ENTER;
55
56 /*
57 * ftrace_disabled is set when an anomaly is discovered.
58 * ftrace_disabled is much stronger than ftrace_enabled.
59 */
60 static int ftrace_disabled __read_mostly;
61
62 static DEFINE_SPINLOCK(ftrace_lock);
63 static DEFINE_MUTEX(ftrace_sysctl_lock);
64
65 static struct ftrace_ops ftrace_list_end __read_mostly =
66 {
67 .func = ftrace_stub,
68 };
69
70 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
71 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
72 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
73
74 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
75 {
76 struct ftrace_ops *op = ftrace_list;
77
78 /* in case someone actually ports this to alpha! */
79 read_barrier_depends();
80
81 while (op != &ftrace_list_end) {
82 /* silly alpha */
83 read_barrier_depends();
84 op->func(ip, parent_ip);
85 op = op->next;
86 };
87 }
88
89 /**
90 * clear_ftrace_function - reset the ftrace function
91 *
92 * This NULLs the ftrace function and in essence stops
93 * tracing. There may be lag
94 */
95 void clear_ftrace_function(void)
96 {
97 ftrace_trace_function = ftrace_stub;
98 __ftrace_trace_function = ftrace_stub;
99 }
100
101 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
102 /*
103 * For those archs that do not test ftrace_trace_stop in their
104 * mcount call site, we need to do it from C.
105 */
106 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
107 {
108 if (function_trace_stop)
109 return;
110
111 __ftrace_trace_function(ip, parent_ip);
112 }
113 #endif
114
115 static int __register_ftrace_function(struct ftrace_ops *ops)
116 {
117 /* should not be called from interrupt context */
118 spin_lock(&ftrace_lock);
119
120 ops->next = ftrace_list;
121 /*
122 * We are entering ops into the ftrace_list but another
123 * CPU might be walking that list. We need to make sure
124 * the ops->next pointer is valid before another CPU sees
125 * the ops pointer included into the ftrace_list.
126 */
127 smp_wmb();
128 ftrace_list = ops;
129
130 if (ftrace_enabled) {
131 /*
132 * For one func, simply call it directly.
133 * For more than one func, call the chain.
134 */
135 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
136 if (ops->next == &ftrace_list_end)
137 ftrace_trace_function = ops->func;
138 else
139 ftrace_trace_function = ftrace_list_func;
140 #else
141 if (ops->next == &ftrace_list_end)
142 __ftrace_trace_function = ops->func;
143 else
144 __ftrace_trace_function = ftrace_list_func;
145 ftrace_trace_function = ftrace_test_stop_func;
146 #endif
147 }
148
149 spin_unlock(&ftrace_lock);
150
151 return 0;
152 }
153
154 static int __unregister_ftrace_function(struct ftrace_ops *ops)
155 {
156 struct ftrace_ops **p;
157 int ret = 0;
158
159 /* should not be called from interrupt context */
160 spin_lock(&ftrace_lock);
161
162 /*
163 * If we are removing the last function, then simply point
164 * to the ftrace_stub.
165 */
166 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
167 ftrace_trace_function = ftrace_stub;
168 ftrace_list = &ftrace_list_end;
169 goto out;
170 }
171
172 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
173 if (*p == ops)
174 break;
175
176 if (*p != ops) {
177 ret = -1;
178 goto out;
179 }
180
181 *p = (*p)->next;
182
183 if (ftrace_enabled) {
184 /* If we only have one func left, then call that directly */
185 if (ftrace_list->next == &ftrace_list_end)
186 ftrace_trace_function = ftrace_list->func;
187 }
188
189 out:
190 spin_unlock(&ftrace_lock);
191
192 return ret;
193 }
194
195 #ifdef CONFIG_DYNAMIC_FTRACE
196 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
197 # error Dynamic ftrace depends on MCOUNT_RECORD
198 #endif
199
200 /*
201 * Since MCOUNT_ADDR may point to mcount itself, we do not want
202 * to get it confused by reading a reference in the code as we
203 * are parsing on objcopy output of text. Use a variable for
204 * it instead.
205 */
206 static unsigned long mcount_addr = MCOUNT_ADDR;
207
208 enum {
209 FTRACE_ENABLE_CALLS = (1 << 0),
210 FTRACE_DISABLE_CALLS = (1 << 1),
211 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
212 FTRACE_ENABLE_MCOUNT = (1 << 3),
213 FTRACE_DISABLE_MCOUNT = (1 << 4),
214 };
215
216 static int ftrace_filtered;
217
218 static LIST_HEAD(ftrace_new_addrs);
219
220 static DEFINE_MUTEX(ftrace_regex_lock);
221
222 struct ftrace_page {
223 struct ftrace_page *next;
224 unsigned long index;
225 struct dyn_ftrace records[];
226 };
227
228 #define ENTRIES_PER_PAGE \
229 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
230
231 /* estimate from running different kernels */
232 #define NR_TO_INIT 10000
233
234 static struct ftrace_page *ftrace_pages_start;
235 static struct ftrace_page *ftrace_pages;
236
237 static struct dyn_ftrace *ftrace_free_records;
238
239
240 #ifdef CONFIG_KPROBES
241
242 static int frozen_record_count;
243
244 static inline void freeze_record(struct dyn_ftrace *rec)
245 {
246 if (!(rec->flags & FTRACE_FL_FROZEN)) {
247 rec->flags |= FTRACE_FL_FROZEN;
248 frozen_record_count++;
249 }
250 }
251
252 static inline void unfreeze_record(struct dyn_ftrace *rec)
253 {
254 if (rec->flags & FTRACE_FL_FROZEN) {
255 rec->flags &= ~FTRACE_FL_FROZEN;
256 frozen_record_count--;
257 }
258 }
259
260 static inline int record_frozen(struct dyn_ftrace *rec)
261 {
262 return rec->flags & FTRACE_FL_FROZEN;
263 }
264 #else
265 # define freeze_record(rec) ({ 0; })
266 # define unfreeze_record(rec) ({ 0; })
267 # define record_frozen(rec) ({ 0; })
268 #endif /* CONFIG_KPROBES */
269
270 static void ftrace_free_rec(struct dyn_ftrace *rec)
271 {
272 rec->ip = (unsigned long)ftrace_free_records;
273 ftrace_free_records = rec;
274 rec->flags |= FTRACE_FL_FREE;
275 }
276
277 void ftrace_release(void *start, unsigned long size)
278 {
279 struct dyn_ftrace *rec;
280 struct ftrace_page *pg;
281 unsigned long s = (unsigned long)start;
282 unsigned long e = s + size;
283 int i;
284
285 if (ftrace_disabled || !start)
286 return;
287
288 /* should not be called from interrupt context */
289 spin_lock(&ftrace_lock);
290
291 for (pg = ftrace_pages_start; pg; pg = pg->next) {
292 for (i = 0; i < pg->index; i++) {
293 rec = &pg->records[i];
294
295 if ((rec->ip >= s) && (rec->ip < e))
296 ftrace_free_rec(rec);
297 }
298 }
299 spin_unlock(&ftrace_lock);
300 }
301
302 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
303 {
304 struct dyn_ftrace *rec;
305
306 /* First check for freed records */
307 if (ftrace_free_records) {
308 rec = ftrace_free_records;
309
310 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
311 FTRACE_WARN_ON_ONCE(1);
312 ftrace_free_records = NULL;
313 return NULL;
314 }
315
316 ftrace_free_records = (void *)rec->ip;
317 memset(rec, 0, sizeof(*rec));
318 return rec;
319 }
320
321 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
322 if (!ftrace_pages->next) {
323 /* allocate another page */
324 ftrace_pages->next =
325 (void *)get_zeroed_page(GFP_KERNEL);
326 if (!ftrace_pages->next)
327 return NULL;
328 }
329 ftrace_pages = ftrace_pages->next;
330 }
331
332 return &ftrace_pages->records[ftrace_pages->index++];
333 }
334
335 static struct dyn_ftrace *
336 ftrace_record_ip(unsigned long ip)
337 {
338 struct dyn_ftrace *rec;
339
340 if (ftrace_disabled)
341 return NULL;
342
343 rec = ftrace_alloc_dyn_node(ip);
344 if (!rec)
345 return NULL;
346
347 rec->ip = ip;
348
349 list_add(&rec->list, &ftrace_new_addrs);
350
351 return rec;
352 }
353
354 static void print_ip_ins(const char *fmt, unsigned char *p)
355 {
356 int i;
357
358 printk(KERN_CONT "%s", fmt);
359
360 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
361 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
362 }
363
364 static void ftrace_bug(int failed, unsigned long ip)
365 {
366 switch (failed) {
367 case -EFAULT:
368 FTRACE_WARN_ON_ONCE(1);
369 pr_info("ftrace faulted on modifying ");
370 print_ip_sym(ip);
371 break;
372 case -EINVAL:
373 FTRACE_WARN_ON_ONCE(1);
374 pr_info("ftrace failed to modify ");
375 print_ip_sym(ip);
376 print_ip_ins(" actual: ", (unsigned char *)ip);
377 printk(KERN_CONT "\n");
378 break;
379 case -EPERM:
380 FTRACE_WARN_ON_ONCE(1);
381 pr_info("ftrace faulted on writing ");
382 print_ip_sym(ip);
383 break;
384 default:
385 FTRACE_WARN_ON_ONCE(1);
386 pr_info("ftrace faulted on unknown error ");
387 print_ip_sym(ip);
388 }
389 }
390
391
392 static int
393 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
394 {
395 unsigned long ip, fl;
396 unsigned long ftrace_addr;
397
398 #ifdef CONFIG_FUNCTION_RET_TRACER
399 if (ftrace_tracing_type == FTRACE_TYPE_ENTER)
400 ftrace_addr = (unsigned long)ftrace_caller;
401 else
402 ftrace_addr = (unsigned long)ftrace_return_caller;
403 #else
404 ftrace_addr = (unsigned long)ftrace_caller;
405 #endif
406
407 ip = rec->ip;
408
409 /*
410 * If this record is not to be traced and
411 * it is not enabled then do nothing.
412 *
413 * If this record is not to be traced and
414 * it is enabled then disabled it.
415 *
416 */
417 if (rec->flags & FTRACE_FL_NOTRACE) {
418 if (rec->flags & FTRACE_FL_ENABLED)
419 rec->flags &= ~FTRACE_FL_ENABLED;
420 else
421 return 0;
422
423 } else if (ftrace_filtered && enable) {
424 /*
425 * Filtering is on:
426 */
427
428 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
429
430 /* Record is filtered and enabled, do nothing */
431 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
432 return 0;
433
434 /* Record is not filtered and is not enabled do nothing */
435 if (!fl)
436 return 0;
437
438 /* Record is not filtered but enabled, disable it */
439 if (fl == FTRACE_FL_ENABLED)
440 rec->flags &= ~FTRACE_FL_ENABLED;
441 else
442 /* Otherwise record is filtered but not enabled, enable it */
443 rec->flags |= FTRACE_FL_ENABLED;
444 } else {
445 /* Disable or not filtered */
446
447 if (enable) {
448 /* if record is enabled, do nothing */
449 if (rec->flags & FTRACE_FL_ENABLED)
450 return 0;
451
452 rec->flags |= FTRACE_FL_ENABLED;
453
454 } else {
455
456 /* if record is not enabled do nothing */
457 if (!(rec->flags & FTRACE_FL_ENABLED))
458 return 0;
459
460 rec->flags &= ~FTRACE_FL_ENABLED;
461 }
462 }
463
464 if (rec->flags & FTRACE_FL_ENABLED)
465 return ftrace_make_call(rec, ftrace_addr);
466 else
467 return ftrace_make_nop(NULL, rec, ftrace_addr);
468 }
469
470 static void ftrace_replace_code(int enable)
471 {
472 int i, failed;
473 struct dyn_ftrace *rec;
474 struct ftrace_page *pg;
475
476 for (pg = ftrace_pages_start; pg; pg = pg->next) {
477 for (i = 0; i < pg->index; i++) {
478 rec = &pg->records[i];
479
480 /*
481 * Skip over free records and records that have
482 * failed.
483 */
484 if (rec->flags & FTRACE_FL_FREE ||
485 rec->flags & FTRACE_FL_FAILED)
486 continue;
487
488 /* ignore updates to this record's mcount site */
489 if (get_kprobe((void *)rec->ip)) {
490 freeze_record(rec);
491 continue;
492 } else {
493 unfreeze_record(rec);
494 }
495
496 failed = __ftrace_replace_code(rec, enable);
497 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
498 rec->flags |= FTRACE_FL_FAILED;
499 if ((system_state == SYSTEM_BOOTING) ||
500 !core_kernel_text(rec->ip)) {
501 ftrace_free_rec(rec);
502 } else
503 ftrace_bug(failed, rec->ip);
504 }
505 }
506 }
507 }
508
509 static int
510 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
511 {
512 unsigned long ip;
513 int ret;
514
515 ip = rec->ip;
516
517 ret = ftrace_make_nop(mod, rec, mcount_addr);
518 if (ret) {
519 ftrace_bug(ret, ip);
520 rec->flags |= FTRACE_FL_FAILED;
521 return 0;
522 }
523 return 1;
524 }
525
526 static int __ftrace_modify_code(void *data)
527 {
528 int *command = data;
529
530 if (*command & FTRACE_ENABLE_CALLS)
531 ftrace_replace_code(1);
532 else if (*command & FTRACE_DISABLE_CALLS)
533 ftrace_replace_code(0);
534
535 if (*command & FTRACE_UPDATE_TRACE_FUNC)
536 ftrace_update_ftrace_func(ftrace_trace_function);
537
538 return 0;
539 }
540
541 static void ftrace_run_update_code(int command)
542 {
543 stop_machine(__ftrace_modify_code, &command, NULL);
544 }
545
546 static ftrace_func_t saved_ftrace_func;
547 static int ftrace_start_up;
548 static DEFINE_MUTEX(ftrace_start_lock);
549
550 static void ftrace_startup(void)
551 {
552 int command = 0;
553
554 if (unlikely(ftrace_disabled))
555 return;
556
557 mutex_lock(&ftrace_start_lock);
558 ftrace_start_up++;
559 command |= FTRACE_ENABLE_CALLS;
560
561 if (saved_ftrace_func != ftrace_trace_function) {
562 saved_ftrace_func = ftrace_trace_function;
563 command |= FTRACE_UPDATE_TRACE_FUNC;
564 }
565
566 if (!command || !ftrace_enabled)
567 goto out;
568
569 ftrace_run_update_code(command);
570 out:
571 mutex_unlock(&ftrace_start_lock);
572 }
573
574 static void ftrace_shutdown(void)
575 {
576 int command = 0;
577
578 if (unlikely(ftrace_disabled))
579 return;
580
581 mutex_lock(&ftrace_start_lock);
582 ftrace_start_up--;
583 if (!ftrace_start_up)
584 command |= FTRACE_DISABLE_CALLS;
585
586 if (saved_ftrace_func != ftrace_trace_function) {
587 saved_ftrace_func = ftrace_trace_function;
588 command |= FTRACE_UPDATE_TRACE_FUNC;
589 }
590
591 if (!command || !ftrace_enabled)
592 goto out;
593
594 ftrace_run_update_code(command);
595 out:
596 mutex_unlock(&ftrace_start_lock);
597 }
598
599 static void ftrace_startup_sysctl(void)
600 {
601 int command = FTRACE_ENABLE_MCOUNT;
602
603 if (unlikely(ftrace_disabled))
604 return;
605
606 mutex_lock(&ftrace_start_lock);
607 /* Force update next time */
608 saved_ftrace_func = NULL;
609 /* ftrace_start_up is true if we want ftrace running */
610 if (ftrace_start_up)
611 command |= FTRACE_ENABLE_CALLS;
612
613 ftrace_run_update_code(command);
614 mutex_unlock(&ftrace_start_lock);
615 }
616
617 static void ftrace_shutdown_sysctl(void)
618 {
619 int command = FTRACE_DISABLE_MCOUNT;
620
621 if (unlikely(ftrace_disabled))
622 return;
623
624 mutex_lock(&ftrace_start_lock);
625 /* ftrace_start_up is true if ftrace is running */
626 if (ftrace_start_up)
627 command |= FTRACE_DISABLE_CALLS;
628
629 ftrace_run_update_code(command);
630 mutex_unlock(&ftrace_start_lock);
631 }
632
633 static cycle_t ftrace_update_time;
634 static unsigned long ftrace_update_cnt;
635 unsigned long ftrace_update_tot_cnt;
636
637 static int ftrace_update_code(struct module *mod)
638 {
639 struct dyn_ftrace *p, *t;
640 cycle_t start, stop;
641
642 start = ftrace_now(raw_smp_processor_id());
643 ftrace_update_cnt = 0;
644
645 list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
646
647 /* If something went wrong, bail without enabling anything */
648 if (unlikely(ftrace_disabled))
649 return -1;
650
651 list_del_init(&p->list);
652
653 /* convert record (i.e, patch mcount-call with NOP) */
654 if (ftrace_code_disable(mod, p)) {
655 p->flags |= FTRACE_FL_CONVERTED;
656 ftrace_update_cnt++;
657 } else
658 ftrace_free_rec(p);
659 }
660
661 stop = ftrace_now(raw_smp_processor_id());
662 ftrace_update_time = stop - start;
663 ftrace_update_tot_cnt += ftrace_update_cnt;
664
665 return 0;
666 }
667
668 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
669 {
670 struct ftrace_page *pg;
671 int cnt;
672 int i;
673
674 /* allocate a few pages */
675 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
676 if (!ftrace_pages_start)
677 return -1;
678
679 /*
680 * Allocate a few more pages.
681 *
682 * TODO: have some parser search vmlinux before
683 * final linking to find all calls to ftrace.
684 * Then we can:
685 * a) know how many pages to allocate.
686 * and/or
687 * b) set up the table then.
688 *
689 * The dynamic code is still necessary for
690 * modules.
691 */
692
693 pg = ftrace_pages = ftrace_pages_start;
694
695 cnt = num_to_init / ENTRIES_PER_PAGE;
696 pr_info("ftrace: allocating %ld entries in %d pages\n",
697 num_to_init, cnt + 1);
698
699 for (i = 0; i < cnt; i++) {
700 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
701
702 /* If we fail, we'll try later anyway */
703 if (!pg->next)
704 break;
705
706 pg = pg->next;
707 }
708
709 return 0;
710 }
711
712 enum {
713 FTRACE_ITER_FILTER = (1 << 0),
714 FTRACE_ITER_CONT = (1 << 1),
715 FTRACE_ITER_NOTRACE = (1 << 2),
716 FTRACE_ITER_FAILURES = (1 << 3),
717 };
718
719 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
720
721 struct ftrace_iterator {
722 loff_t pos;
723 struct ftrace_page *pg;
724 unsigned idx;
725 unsigned flags;
726 unsigned char buffer[FTRACE_BUFF_MAX+1];
727 unsigned buffer_idx;
728 unsigned filtered;
729 };
730
731 static void *
732 t_next(struct seq_file *m, void *v, loff_t *pos)
733 {
734 struct ftrace_iterator *iter = m->private;
735 struct dyn_ftrace *rec = NULL;
736
737 (*pos)++;
738
739 /* should not be called from interrupt context */
740 spin_lock(&ftrace_lock);
741 retry:
742 if (iter->idx >= iter->pg->index) {
743 if (iter->pg->next) {
744 iter->pg = iter->pg->next;
745 iter->idx = 0;
746 goto retry;
747 }
748 } else {
749 rec = &iter->pg->records[iter->idx++];
750 if ((rec->flags & FTRACE_FL_FREE) ||
751
752 (!(iter->flags & FTRACE_ITER_FAILURES) &&
753 (rec->flags & FTRACE_FL_FAILED)) ||
754
755 ((iter->flags & FTRACE_ITER_FAILURES) &&
756 !(rec->flags & FTRACE_FL_FAILED)) ||
757
758 ((iter->flags & FTRACE_ITER_FILTER) &&
759 !(rec->flags & FTRACE_FL_FILTER)) ||
760
761 ((iter->flags & FTRACE_ITER_NOTRACE) &&
762 !(rec->flags & FTRACE_FL_NOTRACE))) {
763 rec = NULL;
764 goto retry;
765 }
766 }
767 spin_unlock(&ftrace_lock);
768
769 iter->pos = *pos;
770
771 return rec;
772 }
773
774 static void *t_start(struct seq_file *m, loff_t *pos)
775 {
776 struct ftrace_iterator *iter = m->private;
777 void *p = NULL;
778 loff_t l = -1;
779
780 if (*pos > iter->pos)
781 *pos = iter->pos;
782
783 l = *pos;
784 p = t_next(m, p, &l);
785
786 return p;
787 }
788
789 static void t_stop(struct seq_file *m, void *p)
790 {
791 }
792
793 static int t_show(struct seq_file *m, void *v)
794 {
795 struct ftrace_iterator *iter = m->private;
796 struct dyn_ftrace *rec = v;
797 char str[KSYM_SYMBOL_LEN];
798 int ret = 0;
799
800 if (!rec)
801 return 0;
802
803 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
804
805 ret = seq_printf(m, "%s\n", str);
806 if (ret < 0) {
807 iter->pos--;
808 iter->idx--;
809 }
810
811 return 0;
812 }
813
814 static struct seq_operations show_ftrace_seq_ops = {
815 .start = t_start,
816 .next = t_next,
817 .stop = t_stop,
818 .show = t_show,
819 };
820
821 static int
822 ftrace_avail_open(struct inode *inode, struct file *file)
823 {
824 struct ftrace_iterator *iter;
825 int ret;
826
827 if (unlikely(ftrace_disabled))
828 return -ENODEV;
829
830 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
831 if (!iter)
832 return -ENOMEM;
833
834 iter->pg = ftrace_pages_start;
835 iter->pos = 0;
836
837 ret = seq_open(file, &show_ftrace_seq_ops);
838 if (!ret) {
839 struct seq_file *m = file->private_data;
840
841 m->private = iter;
842 } else {
843 kfree(iter);
844 }
845
846 return ret;
847 }
848
849 int ftrace_avail_release(struct inode *inode, struct file *file)
850 {
851 struct seq_file *m = (struct seq_file *)file->private_data;
852 struct ftrace_iterator *iter = m->private;
853
854 seq_release(inode, file);
855 kfree(iter);
856
857 return 0;
858 }
859
860 static int
861 ftrace_failures_open(struct inode *inode, struct file *file)
862 {
863 int ret;
864 struct seq_file *m;
865 struct ftrace_iterator *iter;
866
867 ret = ftrace_avail_open(inode, file);
868 if (!ret) {
869 m = (struct seq_file *)file->private_data;
870 iter = (struct ftrace_iterator *)m->private;
871 iter->flags = FTRACE_ITER_FAILURES;
872 }
873
874 return ret;
875 }
876
877
878 static void ftrace_filter_reset(int enable)
879 {
880 struct ftrace_page *pg;
881 struct dyn_ftrace *rec;
882 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
883 unsigned i;
884
885 /* should not be called from interrupt context */
886 spin_lock(&ftrace_lock);
887 if (enable)
888 ftrace_filtered = 0;
889 pg = ftrace_pages_start;
890 while (pg) {
891 for (i = 0; i < pg->index; i++) {
892 rec = &pg->records[i];
893 if (rec->flags & FTRACE_FL_FAILED)
894 continue;
895 rec->flags &= ~type;
896 }
897 pg = pg->next;
898 }
899 spin_unlock(&ftrace_lock);
900 }
901
902 static int
903 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
904 {
905 struct ftrace_iterator *iter;
906 int ret = 0;
907
908 if (unlikely(ftrace_disabled))
909 return -ENODEV;
910
911 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
912 if (!iter)
913 return -ENOMEM;
914
915 mutex_lock(&ftrace_regex_lock);
916 if ((file->f_mode & FMODE_WRITE) &&
917 !(file->f_flags & O_APPEND))
918 ftrace_filter_reset(enable);
919
920 if (file->f_mode & FMODE_READ) {
921 iter->pg = ftrace_pages_start;
922 iter->pos = 0;
923 iter->flags = enable ? FTRACE_ITER_FILTER :
924 FTRACE_ITER_NOTRACE;
925
926 ret = seq_open(file, &show_ftrace_seq_ops);
927 if (!ret) {
928 struct seq_file *m = file->private_data;
929 m->private = iter;
930 } else
931 kfree(iter);
932 } else
933 file->private_data = iter;
934 mutex_unlock(&ftrace_regex_lock);
935
936 return ret;
937 }
938
939 static int
940 ftrace_filter_open(struct inode *inode, struct file *file)
941 {
942 return ftrace_regex_open(inode, file, 1);
943 }
944
945 static int
946 ftrace_notrace_open(struct inode *inode, struct file *file)
947 {
948 return ftrace_regex_open(inode, file, 0);
949 }
950
951 static ssize_t
952 ftrace_regex_read(struct file *file, char __user *ubuf,
953 size_t cnt, loff_t *ppos)
954 {
955 if (file->f_mode & FMODE_READ)
956 return seq_read(file, ubuf, cnt, ppos);
957 else
958 return -EPERM;
959 }
960
961 static loff_t
962 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
963 {
964 loff_t ret;
965
966 if (file->f_mode & FMODE_READ)
967 ret = seq_lseek(file, offset, origin);
968 else
969 file->f_pos = ret = 1;
970
971 return ret;
972 }
973
974 enum {
975 MATCH_FULL,
976 MATCH_FRONT_ONLY,
977 MATCH_MIDDLE_ONLY,
978 MATCH_END_ONLY,
979 };
980
981 static void
982 ftrace_match(unsigned char *buff, int len, int enable)
983 {
984 char str[KSYM_SYMBOL_LEN];
985 char *search = NULL;
986 struct ftrace_page *pg;
987 struct dyn_ftrace *rec;
988 int type = MATCH_FULL;
989 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
990 unsigned i, match = 0, search_len = 0;
991
992 for (i = 0; i < len; i++) {
993 if (buff[i] == '*') {
994 if (!i) {
995 search = buff + i + 1;
996 type = MATCH_END_ONLY;
997 search_len = len - (i + 1);
998 } else {
999 if (type == MATCH_END_ONLY) {
1000 type = MATCH_MIDDLE_ONLY;
1001 } else {
1002 match = i;
1003 type = MATCH_FRONT_ONLY;
1004 }
1005 buff[i] = 0;
1006 break;
1007 }
1008 }
1009 }
1010
1011 /* should not be called from interrupt context */
1012 spin_lock(&ftrace_lock);
1013 if (enable)
1014 ftrace_filtered = 1;
1015 pg = ftrace_pages_start;
1016 while (pg) {
1017 for (i = 0; i < pg->index; i++) {
1018 int matched = 0;
1019 char *ptr;
1020
1021 rec = &pg->records[i];
1022 if (rec->flags & FTRACE_FL_FAILED)
1023 continue;
1024 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1025 switch (type) {
1026 case MATCH_FULL:
1027 if (strcmp(str, buff) == 0)
1028 matched = 1;
1029 break;
1030 case MATCH_FRONT_ONLY:
1031 if (memcmp(str, buff, match) == 0)
1032 matched = 1;
1033 break;
1034 case MATCH_MIDDLE_ONLY:
1035 if (strstr(str, search))
1036 matched = 1;
1037 break;
1038 case MATCH_END_ONLY:
1039 ptr = strstr(str, search);
1040 if (ptr && (ptr[search_len] == 0))
1041 matched = 1;
1042 break;
1043 }
1044 if (matched)
1045 rec->flags |= flag;
1046 }
1047 pg = pg->next;
1048 }
1049 spin_unlock(&ftrace_lock);
1050 }
1051
1052 static ssize_t
1053 ftrace_regex_write(struct file *file, const char __user *ubuf,
1054 size_t cnt, loff_t *ppos, int enable)
1055 {
1056 struct ftrace_iterator *iter;
1057 char ch;
1058 size_t read = 0;
1059 ssize_t ret;
1060
1061 if (!cnt || cnt < 0)
1062 return 0;
1063
1064 mutex_lock(&ftrace_regex_lock);
1065
1066 if (file->f_mode & FMODE_READ) {
1067 struct seq_file *m = file->private_data;
1068 iter = m->private;
1069 } else
1070 iter = file->private_data;
1071
1072 if (!*ppos) {
1073 iter->flags &= ~FTRACE_ITER_CONT;
1074 iter->buffer_idx = 0;
1075 }
1076
1077 ret = get_user(ch, ubuf++);
1078 if (ret)
1079 goto out;
1080 read++;
1081 cnt--;
1082
1083 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1084 /* skip white space */
1085 while (cnt && isspace(ch)) {
1086 ret = get_user(ch, ubuf++);
1087 if (ret)
1088 goto out;
1089 read++;
1090 cnt--;
1091 }
1092
1093 if (isspace(ch)) {
1094 file->f_pos += read;
1095 ret = read;
1096 goto out;
1097 }
1098
1099 iter->buffer_idx = 0;
1100 }
1101
1102 while (cnt && !isspace(ch)) {
1103 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1104 iter->buffer[iter->buffer_idx++] = ch;
1105 else {
1106 ret = -EINVAL;
1107 goto out;
1108 }
1109 ret = get_user(ch, ubuf++);
1110 if (ret)
1111 goto out;
1112 read++;
1113 cnt--;
1114 }
1115
1116 if (isspace(ch)) {
1117 iter->filtered++;
1118 iter->buffer[iter->buffer_idx] = 0;
1119 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1120 iter->buffer_idx = 0;
1121 } else
1122 iter->flags |= FTRACE_ITER_CONT;
1123
1124
1125 file->f_pos += read;
1126
1127 ret = read;
1128 out:
1129 mutex_unlock(&ftrace_regex_lock);
1130
1131 return ret;
1132 }
1133
1134 static ssize_t
1135 ftrace_filter_write(struct file *file, const char __user *ubuf,
1136 size_t cnt, loff_t *ppos)
1137 {
1138 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1139 }
1140
1141 static ssize_t
1142 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1143 size_t cnt, loff_t *ppos)
1144 {
1145 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1146 }
1147
1148 static void
1149 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1150 {
1151 if (unlikely(ftrace_disabled))
1152 return;
1153
1154 mutex_lock(&ftrace_regex_lock);
1155 if (reset)
1156 ftrace_filter_reset(enable);
1157 if (buf)
1158 ftrace_match(buf, len, enable);
1159 mutex_unlock(&ftrace_regex_lock);
1160 }
1161
1162 /**
1163 * ftrace_set_filter - set a function to filter on in ftrace
1164 * @buf - the string that holds the function filter text.
1165 * @len - the length of the string.
1166 * @reset - non zero to reset all filters before applying this filter.
1167 *
1168 * Filters denote which functions should be enabled when tracing is enabled.
1169 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1170 */
1171 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1172 {
1173 ftrace_set_regex(buf, len, reset, 1);
1174 }
1175
1176 /**
1177 * ftrace_set_notrace - set a function to not trace in ftrace
1178 * @buf - the string that holds the function notrace text.
1179 * @len - the length of the string.
1180 * @reset - non zero to reset all filters before applying this filter.
1181 *
1182 * Notrace Filters denote which functions should not be enabled when tracing
1183 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1184 * for tracing.
1185 */
1186 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1187 {
1188 ftrace_set_regex(buf, len, reset, 0);
1189 }
1190
1191 static int
1192 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1193 {
1194 struct seq_file *m = (struct seq_file *)file->private_data;
1195 struct ftrace_iterator *iter;
1196
1197 mutex_lock(&ftrace_regex_lock);
1198 if (file->f_mode & FMODE_READ) {
1199 iter = m->private;
1200
1201 seq_release(inode, file);
1202 } else
1203 iter = file->private_data;
1204
1205 if (iter->buffer_idx) {
1206 iter->filtered++;
1207 iter->buffer[iter->buffer_idx] = 0;
1208 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1209 }
1210
1211 mutex_lock(&ftrace_sysctl_lock);
1212 mutex_lock(&ftrace_start_lock);
1213 if (ftrace_start_up && ftrace_enabled)
1214 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1215 mutex_unlock(&ftrace_start_lock);
1216 mutex_unlock(&ftrace_sysctl_lock);
1217
1218 kfree(iter);
1219 mutex_unlock(&ftrace_regex_lock);
1220 return 0;
1221 }
1222
1223 static int
1224 ftrace_filter_release(struct inode *inode, struct file *file)
1225 {
1226 return ftrace_regex_release(inode, file, 1);
1227 }
1228
1229 static int
1230 ftrace_notrace_release(struct inode *inode, struct file *file)
1231 {
1232 return ftrace_regex_release(inode, file, 0);
1233 }
1234
1235 static struct file_operations ftrace_avail_fops = {
1236 .open = ftrace_avail_open,
1237 .read = seq_read,
1238 .llseek = seq_lseek,
1239 .release = ftrace_avail_release,
1240 };
1241
1242 static struct file_operations ftrace_failures_fops = {
1243 .open = ftrace_failures_open,
1244 .read = seq_read,
1245 .llseek = seq_lseek,
1246 .release = ftrace_avail_release,
1247 };
1248
1249 static struct file_operations ftrace_filter_fops = {
1250 .open = ftrace_filter_open,
1251 .read = ftrace_regex_read,
1252 .write = ftrace_filter_write,
1253 .llseek = ftrace_regex_lseek,
1254 .release = ftrace_filter_release,
1255 };
1256
1257 static struct file_operations ftrace_notrace_fops = {
1258 .open = ftrace_notrace_open,
1259 .read = ftrace_regex_read,
1260 .write = ftrace_notrace_write,
1261 .llseek = ftrace_regex_lseek,
1262 .release = ftrace_notrace_release,
1263 };
1264
1265 static __init int ftrace_init_debugfs(void)
1266 {
1267 struct dentry *d_tracer;
1268 struct dentry *entry;
1269
1270 d_tracer = tracing_init_dentry();
1271
1272 entry = debugfs_create_file("available_filter_functions", 0444,
1273 d_tracer, NULL, &ftrace_avail_fops);
1274 if (!entry)
1275 pr_warning("Could not create debugfs "
1276 "'available_filter_functions' entry\n");
1277
1278 entry = debugfs_create_file("failures", 0444,
1279 d_tracer, NULL, &ftrace_failures_fops);
1280 if (!entry)
1281 pr_warning("Could not create debugfs 'failures' entry\n");
1282
1283 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1284 NULL, &ftrace_filter_fops);
1285 if (!entry)
1286 pr_warning("Could not create debugfs "
1287 "'set_ftrace_filter' entry\n");
1288
1289 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1290 NULL, &ftrace_notrace_fops);
1291 if (!entry)
1292 pr_warning("Could not create debugfs "
1293 "'set_ftrace_notrace' entry\n");
1294
1295 return 0;
1296 }
1297
1298 fs_initcall(ftrace_init_debugfs);
1299
1300 static int ftrace_convert_nops(struct module *mod,
1301 unsigned long *start,
1302 unsigned long *end)
1303 {
1304 unsigned long *p;
1305 unsigned long addr;
1306 unsigned long flags;
1307
1308 mutex_lock(&ftrace_start_lock);
1309 p = start;
1310 while (p < end) {
1311 addr = ftrace_call_adjust(*p++);
1312 /*
1313 * Some architecture linkers will pad between
1314 * the different mcount_loc sections of different
1315 * object files to satisfy alignments.
1316 * Skip any NULL pointers.
1317 */
1318 if (!addr)
1319 continue;
1320 ftrace_record_ip(addr);
1321 }
1322
1323 /* disable interrupts to prevent kstop machine */
1324 local_irq_save(flags);
1325 ftrace_update_code(mod);
1326 local_irq_restore(flags);
1327 mutex_unlock(&ftrace_start_lock);
1328
1329 return 0;
1330 }
1331
1332 void ftrace_init_module(struct module *mod,
1333 unsigned long *start, unsigned long *end)
1334 {
1335 if (ftrace_disabled || start == end)
1336 return;
1337 ftrace_convert_nops(mod, start, end);
1338 }
1339
1340 extern unsigned long __start_mcount_loc[];
1341 extern unsigned long __stop_mcount_loc[];
1342
1343 void __init ftrace_init(void)
1344 {
1345 unsigned long count, addr, flags;
1346 int ret;
1347
1348 /* Keep the ftrace pointer to the stub */
1349 addr = (unsigned long)ftrace_stub;
1350
1351 local_irq_save(flags);
1352 ftrace_dyn_arch_init(&addr);
1353 local_irq_restore(flags);
1354
1355 /* ftrace_dyn_arch_init places the return code in addr */
1356 if (addr)
1357 goto failed;
1358
1359 count = __stop_mcount_loc - __start_mcount_loc;
1360
1361 ret = ftrace_dyn_table_alloc(count);
1362 if (ret)
1363 goto failed;
1364
1365 last_ftrace_enabled = ftrace_enabled = 1;
1366
1367 ret = ftrace_convert_nops(NULL,
1368 __start_mcount_loc,
1369 __stop_mcount_loc);
1370
1371 return;
1372 failed:
1373 ftrace_disabled = 1;
1374 }
1375
1376 #else
1377
1378 static int __init ftrace_nodyn_init(void)
1379 {
1380 ftrace_enabled = 1;
1381 return 0;
1382 }
1383 device_initcall(ftrace_nodyn_init);
1384
1385 # define ftrace_startup() do { } while (0)
1386 # define ftrace_shutdown() do { } while (0)
1387 # define ftrace_startup_sysctl() do { } while (0)
1388 # define ftrace_shutdown_sysctl() do { } while (0)
1389 #endif /* CONFIG_DYNAMIC_FTRACE */
1390
1391 /**
1392 * ftrace_kill - kill ftrace
1393 *
1394 * This function should be used by panic code. It stops ftrace
1395 * but in a not so nice way. If you need to simply kill ftrace
1396 * from a non-atomic section, use ftrace_kill.
1397 */
1398 void ftrace_kill(void)
1399 {
1400 ftrace_disabled = 1;
1401 ftrace_enabled = 0;
1402 clear_ftrace_function();
1403 }
1404
1405 /**
1406 * register_ftrace_function - register a function for profiling
1407 * @ops - ops structure that holds the function for profiling.
1408 *
1409 * Register a function to be called by all functions in the
1410 * kernel.
1411 *
1412 * Note: @ops->func and all the functions it calls must be labeled
1413 * with "notrace", otherwise it will go into a
1414 * recursive loop.
1415 */
1416 int register_ftrace_function(struct ftrace_ops *ops)
1417 {
1418 int ret;
1419
1420 if (unlikely(ftrace_disabled))
1421 return -1;
1422
1423 mutex_lock(&ftrace_sysctl_lock);
1424
1425 if (ftrace_tracing_type == FTRACE_TYPE_RETURN) {
1426 ret = -EBUSY;
1427 goto out;
1428 }
1429
1430 ret = __register_ftrace_function(ops);
1431 ftrace_startup();
1432
1433 out:
1434 mutex_unlock(&ftrace_sysctl_lock);
1435 return ret;
1436 }
1437
1438 /**
1439 * unregister_ftrace_function - unresgister a function for profiling.
1440 * @ops - ops structure that holds the function to unregister
1441 *
1442 * Unregister a function that was added to be called by ftrace profiling.
1443 */
1444 int unregister_ftrace_function(struct ftrace_ops *ops)
1445 {
1446 int ret;
1447
1448 mutex_lock(&ftrace_sysctl_lock);
1449 ret = __unregister_ftrace_function(ops);
1450 ftrace_shutdown();
1451 mutex_unlock(&ftrace_sysctl_lock);
1452
1453 return ret;
1454 }
1455
1456 int
1457 ftrace_enable_sysctl(struct ctl_table *table, int write,
1458 struct file *file, void __user *buffer, size_t *lenp,
1459 loff_t *ppos)
1460 {
1461 int ret;
1462
1463 if (unlikely(ftrace_disabled))
1464 return -ENODEV;
1465
1466 mutex_lock(&ftrace_sysctl_lock);
1467
1468 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
1469
1470 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1471 goto out;
1472
1473 last_ftrace_enabled = ftrace_enabled;
1474
1475 if (ftrace_enabled) {
1476
1477 ftrace_startup_sysctl();
1478
1479 /* we are starting ftrace again */
1480 if (ftrace_list != &ftrace_list_end) {
1481 if (ftrace_list->next == &ftrace_list_end)
1482 ftrace_trace_function = ftrace_list->func;
1483 else
1484 ftrace_trace_function = ftrace_list_func;
1485 }
1486
1487 } else {
1488 /* stopping ftrace calls (just send to ftrace_stub) */
1489 ftrace_trace_function = ftrace_stub;
1490
1491 ftrace_shutdown_sysctl();
1492 }
1493
1494 out:
1495 mutex_unlock(&ftrace_sysctl_lock);
1496 return ret;
1497 }
1498
1499 #ifdef CONFIG_FUNCTION_RET_TRACER
1500
1501 static atomic_t ftrace_retfunc_active;
1502
1503 /* The callback that hooks the return of a function */
1504 trace_function_return_t ftrace_function_return =
1505 (trace_function_return_t)ftrace_stub;
1506
1507
1508 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
1509 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
1510 {
1511 int i;
1512 int ret = 0;
1513 unsigned long flags;
1514 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
1515 struct task_struct *g, *t;
1516
1517 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
1518 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
1519 * sizeof(struct ftrace_ret_stack),
1520 GFP_KERNEL);
1521 if (!ret_stack_list[i]) {
1522 start = 0;
1523 end = i;
1524 ret = -ENOMEM;
1525 goto free;
1526 }
1527 }
1528
1529 read_lock_irqsave(&tasklist_lock, flags);
1530 do_each_thread(g, t) {
1531 if (start == end) {
1532 ret = -EAGAIN;
1533 goto unlock;
1534 }
1535
1536 if (t->ret_stack == NULL) {
1537 t->ret_stack = ret_stack_list[start++];
1538 t->curr_ret_stack = -1;
1539 atomic_set(&t->trace_overrun, 0);
1540 }
1541 } while_each_thread(g, t);
1542
1543 unlock:
1544 read_unlock_irqrestore(&tasklist_lock, flags);
1545 free:
1546 for (i = start; i < end; i++)
1547 kfree(ret_stack_list[i]);
1548 return ret;
1549 }
1550
1551 /* Allocate a return stack for each task */
1552 static int start_return_tracing(void)
1553 {
1554 struct ftrace_ret_stack **ret_stack_list;
1555 int ret;
1556
1557 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
1558 sizeof(struct ftrace_ret_stack *),
1559 GFP_KERNEL);
1560
1561 if (!ret_stack_list)
1562 return -ENOMEM;
1563
1564 do {
1565 ret = alloc_retstack_tasklist(ret_stack_list);
1566 } while (ret == -EAGAIN);
1567
1568 kfree(ret_stack_list);
1569 return ret;
1570 }
1571
1572 int register_ftrace_return(trace_function_return_t func)
1573 {
1574 int ret = 0;
1575
1576 mutex_lock(&ftrace_sysctl_lock);
1577
1578 /*
1579 * Don't launch return tracing if normal function
1580 * tracing is already running.
1581 */
1582 if (ftrace_trace_function != ftrace_stub) {
1583 ret = -EBUSY;
1584 goto out;
1585 }
1586 atomic_inc(&ftrace_retfunc_active);
1587 ret = start_return_tracing();
1588 if (ret) {
1589 atomic_dec(&ftrace_retfunc_active);
1590 goto out;
1591 }
1592 ftrace_tracing_type = FTRACE_TYPE_RETURN;
1593 ftrace_function_return = func;
1594 ftrace_startup();
1595
1596 out:
1597 mutex_unlock(&ftrace_sysctl_lock);
1598 return ret;
1599 }
1600
1601 void unregister_ftrace_return(void)
1602 {
1603 mutex_lock(&ftrace_sysctl_lock);
1604
1605 atomic_dec(&ftrace_retfunc_active);
1606 ftrace_function_return = (trace_function_return_t)ftrace_stub;
1607 ftrace_shutdown();
1608 /* Restore normal tracing type */
1609 ftrace_tracing_type = FTRACE_TYPE_ENTER;
1610
1611 mutex_unlock(&ftrace_sysctl_lock);
1612 }
1613
1614 /* Allocate a return stack for newly created task */
1615 void ftrace_retfunc_init_task(struct task_struct *t)
1616 {
1617 if (atomic_read(&ftrace_retfunc_active)) {
1618 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
1619 * sizeof(struct ftrace_ret_stack),
1620 GFP_KERNEL);
1621 if (!t->ret_stack)
1622 return;
1623 t->curr_ret_stack = -1;
1624 atomic_set(&t->trace_overrun, 0);
1625 } else
1626 t->ret_stack = NULL;
1627 }
1628
1629 void ftrace_retfunc_exit_task(struct task_struct *t)
1630 {
1631 struct ftrace_ret_stack *ret_stack = t->ret_stack;
1632
1633 t->ret_stack = NULL;
1634 /* NULL must become visible to IRQs before we free it: */
1635 barrier();
1636
1637 kfree(ret_stack);
1638 }
1639 #endif
1640
1641
1642
This page took 0.147115 seconds and 4 git commands to generate.