ftrace/x86: Add dynamic allocated trampoline for ftrace_ops
[deliverable/linux.git] / kernel / trace / ftrace.c
1 /*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
14 */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/bsearch.h>
26 #include <linux/module.h>
27 #include <linux/ftrace.h>
28 #include <linux/sysctl.h>
29 #include <linux/slab.h>
30 #include <linux/ctype.h>
31 #include <linux/sort.h>
32 #include <linux/list.h>
33 #include <linux/hash.h>
34 #include <linux/rcupdate.h>
35
36 #include <trace/events/sched.h>
37
38 #include <asm/setup.h>
39
40 #include "trace_output.h"
41 #include "trace_stat.h"
42
43 #define FTRACE_WARN_ON(cond) \
44 ({ \
45 int ___r = cond; \
46 if (WARN_ON(___r)) \
47 ftrace_kill(); \
48 ___r; \
49 })
50
51 #define FTRACE_WARN_ON_ONCE(cond) \
52 ({ \
53 int ___r = cond; \
54 if (WARN_ON_ONCE(___r)) \
55 ftrace_kill(); \
56 ___r; \
57 })
58
59 /* hash bits for specific function selection */
60 #define FTRACE_HASH_BITS 7
61 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
62 #define FTRACE_HASH_DEFAULT_BITS 10
63 #define FTRACE_HASH_MAX_BITS 12
64
65 #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL)
66
67 #ifdef CONFIG_DYNAMIC_FTRACE
68 #define INIT_OPS_HASH(opsname) \
69 .func_hash = &opsname.local_hash, \
70 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
71 #define ASSIGN_OPS_HASH(opsname, val) \
72 .func_hash = val, \
73 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
74 #else
75 #define INIT_OPS_HASH(opsname)
76 #define ASSIGN_OPS_HASH(opsname, val)
77 #endif
78
79 static struct ftrace_ops ftrace_list_end __read_mostly = {
80 .func = ftrace_stub,
81 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
82 INIT_OPS_HASH(ftrace_list_end)
83 };
84
85 /* ftrace_enabled is a method to turn ftrace on or off */
86 int ftrace_enabled __read_mostly;
87 static int last_ftrace_enabled;
88
89 /* Current function tracing op */
90 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
91 /* What to set function_trace_op to */
92 static struct ftrace_ops *set_function_trace_op;
93
94 /* List for set_ftrace_pid's pids. */
95 LIST_HEAD(ftrace_pids);
96 struct ftrace_pid {
97 struct list_head list;
98 struct pid *pid;
99 };
100
101 /*
102 * ftrace_disabled is set when an anomaly is discovered.
103 * ftrace_disabled is much stronger than ftrace_enabled.
104 */
105 static int ftrace_disabled __read_mostly;
106
107 static DEFINE_MUTEX(ftrace_lock);
108
109 static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
110 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
111 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
112 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
113 static struct ftrace_ops global_ops;
114 static struct ftrace_ops control_ops;
115
116 static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip,
117 struct ftrace_ops *op, struct pt_regs *regs);
118
119 #if ARCH_SUPPORTS_FTRACE_OPS
120 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
121 struct ftrace_ops *op, struct pt_regs *regs);
122 #else
123 /* See comment below, where ftrace_ops_list_func is defined */
124 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
125 #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
126 #endif
127
128 /*
129 * Traverse the ftrace_global_list, invoking all entries. The reason that we
130 * can use rcu_dereference_raw_notrace() is that elements removed from this list
131 * are simply leaked, so there is no need to interact with a grace-period
132 * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle
133 * concurrent insertions into the ftrace_global_list.
134 *
135 * Silly Alpha and silly pointer-speculation compiler optimizations!
136 */
137 #define do_for_each_ftrace_op(op, list) \
138 op = rcu_dereference_raw_notrace(list); \
139 do
140
141 /*
142 * Optimized for just a single item in the list (as that is the normal case).
143 */
144 #define while_for_each_ftrace_op(op) \
145 while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \
146 unlikely((op) != &ftrace_list_end))
147
148 static inline void ftrace_ops_init(struct ftrace_ops *ops)
149 {
150 #ifdef CONFIG_DYNAMIC_FTRACE
151 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
152 mutex_init(&ops->local_hash.regex_lock);
153 ops->func_hash = &ops->local_hash;
154 ops->flags |= FTRACE_OPS_FL_INITIALIZED;
155 }
156 #endif
157 }
158
159 /**
160 * ftrace_nr_registered_ops - return number of ops registered
161 *
162 * Returns the number of ftrace_ops registered and tracing functions
163 */
164 int ftrace_nr_registered_ops(void)
165 {
166 struct ftrace_ops *ops;
167 int cnt = 0;
168
169 mutex_lock(&ftrace_lock);
170
171 for (ops = ftrace_ops_list;
172 ops != &ftrace_list_end; ops = ops->next)
173 cnt++;
174
175 mutex_unlock(&ftrace_lock);
176
177 return cnt;
178 }
179
180 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
181 struct ftrace_ops *op, struct pt_regs *regs)
182 {
183 if (!test_tsk_trace_trace(current))
184 return;
185
186 ftrace_pid_function(ip, parent_ip, op, regs);
187 }
188
189 static void set_ftrace_pid_function(ftrace_func_t func)
190 {
191 /* do not set ftrace_pid_function to itself! */
192 if (func != ftrace_pid_func)
193 ftrace_pid_function = func;
194 }
195
196 /**
197 * clear_ftrace_function - reset the ftrace function
198 *
199 * This NULLs the ftrace function and in essence stops
200 * tracing. There may be lag
201 */
202 void clear_ftrace_function(void)
203 {
204 ftrace_trace_function = ftrace_stub;
205 ftrace_pid_function = ftrace_stub;
206 }
207
208 static void control_ops_disable_all(struct ftrace_ops *ops)
209 {
210 int cpu;
211
212 for_each_possible_cpu(cpu)
213 *per_cpu_ptr(ops->disabled, cpu) = 1;
214 }
215
216 static int control_ops_alloc(struct ftrace_ops *ops)
217 {
218 int __percpu *disabled;
219
220 disabled = alloc_percpu(int);
221 if (!disabled)
222 return -ENOMEM;
223
224 ops->disabled = disabled;
225 control_ops_disable_all(ops);
226 return 0;
227 }
228
229 static void ftrace_sync(struct work_struct *work)
230 {
231 /*
232 * This function is just a stub to implement a hard force
233 * of synchronize_sched(). This requires synchronizing
234 * tasks even in userspace and idle.
235 *
236 * Yes, function tracing is rude.
237 */
238 }
239
240 static void ftrace_sync_ipi(void *data)
241 {
242 /* Probably not needed, but do it anyway */
243 smp_rmb();
244 }
245
246 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
247 static void update_function_graph_func(void);
248 #else
249 static inline void update_function_graph_func(void) { }
250 #endif
251
252 static void update_ftrace_function(void)
253 {
254 ftrace_func_t func;
255
256 /*
257 * Prepare the ftrace_ops that the arch callback will use.
258 * If there's only one ftrace_ops registered, the ftrace_ops_list
259 * will point to the ops we want.
260 */
261 set_function_trace_op = ftrace_ops_list;
262
263 /* If there's no ftrace_ops registered, just call the stub function */
264 if (ftrace_ops_list == &ftrace_list_end) {
265 func = ftrace_stub;
266
267 /*
268 * If we are at the end of the list and this ops is
269 * recursion safe and not dynamic and the arch supports passing ops,
270 * then have the mcount trampoline call the function directly.
271 */
272 } else if (ftrace_ops_list->next == &ftrace_list_end) {
273 func = ftrace_ops_get_func(ftrace_ops_list);
274
275 } else {
276 /* Just use the default ftrace_ops */
277 set_function_trace_op = &ftrace_list_end;
278 func = ftrace_ops_list_func;
279 }
280
281 update_function_graph_func();
282
283 /* If there's no change, then do nothing more here */
284 if (ftrace_trace_function == func)
285 return;
286
287 /*
288 * If we are using the list function, it doesn't care
289 * about the function_trace_ops.
290 */
291 if (func == ftrace_ops_list_func) {
292 ftrace_trace_function = func;
293 /*
294 * Don't even bother setting function_trace_ops,
295 * it would be racy to do so anyway.
296 */
297 return;
298 }
299
300 #ifndef CONFIG_DYNAMIC_FTRACE
301 /*
302 * For static tracing, we need to be a bit more careful.
303 * The function change takes affect immediately. Thus,
304 * we need to coorditate the setting of the function_trace_ops
305 * with the setting of the ftrace_trace_function.
306 *
307 * Set the function to the list ops, which will call the
308 * function we want, albeit indirectly, but it handles the
309 * ftrace_ops and doesn't depend on function_trace_op.
310 */
311 ftrace_trace_function = ftrace_ops_list_func;
312 /*
313 * Make sure all CPUs see this. Yes this is slow, but static
314 * tracing is slow and nasty to have enabled.
315 */
316 schedule_on_each_cpu(ftrace_sync);
317 /* Now all cpus are using the list ops. */
318 function_trace_op = set_function_trace_op;
319 /* Make sure the function_trace_op is visible on all CPUs */
320 smp_wmb();
321 /* Nasty way to force a rmb on all cpus */
322 smp_call_function(ftrace_sync_ipi, NULL, 1);
323 /* OK, we are all set to update the ftrace_trace_function now! */
324 #endif /* !CONFIG_DYNAMIC_FTRACE */
325
326 ftrace_trace_function = func;
327 }
328
329 int using_ftrace_ops_list_func(void)
330 {
331 return ftrace_trace_function == ftrace_ops_list_func;
332 }
333
334 static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
335 {
336 ops->next = *list;
337 /*
338 * We are entering ops into the list but another
339 * CPU might be walking that list. We need to make sure
340 * the ops->next pointer is valid before another CPU sees
341 * the ops pointer included into the list.
342 */
343 rcu_assign_pointer(*list, ops);
344 }
345
346 static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
347 {
348 struct ftrace_ops **p;
349
350 /*
351 * If we are removing the last function, then simply point
352 * to the ftrace_stub.
353 */
354 if (*list == ops && ops->next == &ftrace_list_end) {
355 *list = &ftrace_list_end;
356 return 0;
357 }
358
359 for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
360 if (*p == ops)
361 break;
362
363 if (*p != ops)
364 return -1;
365
366 *p = (*p)->next;
367 return 0;
368 }
369
370 static void add_ftrace_list_ops(struct ftrace_ops **list,
371 struct ftrace_ops *main_ops,
372 struct ftrace_ops *ops)
373 {
374 int first = *list == &ftrace_list_end;
375 add_ftrace_ops(list, ops);
376 if (first)
377 add_ftrace_ops(&ftrace_ops_list, main_ops);
378 }
379
380 static int remove_ftrace_list_ops(struct ftrace_ops **list,
381 struct ftrace_ops *main_ops,
382 struct ftrace_ops *ops)
383 {
384 int ret = remove_ftrace_ops(list, ops);
385 if (!ret && *list == &ftrace_list_end)
386 ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
387 return ret;
388 }
389
390 static void ftrace_update_trampoline(struct ftrace_ops *ops);
391
392 static int __register_ftrace_function(struct ftrace_ops *ops)
393 {
394 if (ops->flags & FTRACE_OPS_FL_DELETED)
395 return -EINVAL;
396
397 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
398 return -EBUSY;
399
400 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
401 /*
402 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
403 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
404 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
405 */
406 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
407 !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
408 return -EINVAL;
409
410 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
411 ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
412 #endif
413
414 if (!core_kernel_data((unsigned long)ops))
415 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
416
417 if (ops->flags & FTRACE_OPS_FL_CONTROL) {
418 if (control_ops_alloc(ops))
419 return -ENOMEM;
420 add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
421 } else
422 add_ftrace_ops(&ftrace_ops_list, ops);
423
424 ftrace_update_trampoline(ops);
425
426 if (ftrace_enabled)
427 update_ftrace_function();
428
429 return 0;
430 }
431
432 static int __unregister_ftrace_function(struct ftrace_ops *ops)
433 {
434 int ret;
435
436 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
437 return -EBUSY;
438
439 if (ops->flags & FTRACE_OPS_FL_CONTROL) {
440 ret = remove_ftrace_list_ops(&ftrace_control_list,
441 &control_ops, ops);
442 } else
443 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
444
445 if (ret < 0)
446 return ret;
447
448 if (ftrace_enabled)
449 update_ftrace_function();
450
451 return 0;
452 }
453
454 static void ftrace_update_pid_func(void)
455 {
456 /* Only do something if we are tracing something */
457 if (ftrace_trace_function == ftrace_stub)
458 return;
459
460 update_ftrace_function();
461 }
462
463 #ifdef CONFIG_FUNCTION_PROFILER
464 struct ftrace_profile {
465 struct hlist_node node;
466 unsigned long ip;
467 unsigned long counter;
468 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
469 unsigned long long time;
470 unsigned long long time_squared;
471 #endif
472 };
473
474 struct ftrace_profile_page {
475 struct ftrace_profile_page *next;
476 unsigned long index;
477 struct ftrace_profile records[];
478 };
479
480 struct ftrace_profile_stat {
481 atomic_t disabled;
482 struct hlist_head *hash;
483 struct ftrace_profile_page *pages;
484 struct ftrace_profile_page *start;
485 struct tracer_stat stat;
486 };
487
488 #define PROFILE_RECORDS_SIZE \
489 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
490
491 #define PROFILES_PER_PAGE \
492 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
493
494 static int ftrace_profile_enabled __read_mostly;
495
496 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
497 static DEFINE_MUTEX(ftrace_profile_lock);
498
499 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
500
501 #define FTRACE_PROFILE_HASH_BITS 10
502 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
503
504 static void *
505 function_stat_next(void *v, int idx)
506 {
507 struct ftrace_profile *rec = v;
508 struct ftrace_profile_page *pg;
509
510 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
511
512 again:
513 if (idx != 0)
514 rec++;
515
516 if ((void *)rec >= (void *)&pg->records[pg->index]) {
517 pg = pg->next;
518 if (!pg)
519 return NULL;
520 rec = &pg->records[0];
521 if (!rec->counter)
522 goto again;
523 }
524
525 return rec;
526 }
527
528 static void *function_stat_start(struct tracer_stat *trace)
529 {
530 struct ftrace_profile_stat *stat =
531 container_of(trace, struct ftrace_profile_stat, stat);
532
533 if (!stat || !stat->start)
534 return NULL;
535
536 return function_stat_next(&stat->start->records[0], 0);
537 }
538
539 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
540 /* function graph compares on total time */
541 static int function_stat_cmp(void *p1, void *p2)
542 {
543 struct ftrace_profile *a = p1;
544 struct ftrace_profile *b = p2;
545
546 if (a->time < b->time)
547 return -1;
548 if (a->time > b->time)
549 return 1;
550 else
551 return 0;
552 }
553 #else
554 /* not function graph compares against hits */
555 static int function_stat_cmp(void *p1, void *p2)
556 {
557 struct ftrace_profile *a = p1;
558 struct ftrace_profile *b = p2;
559
560 if (a->counter < b->counter)
561 return -1;
562 if (a->counter > b->counter)
563 return 1;
564 else
565 return 0;
566 }
567 #endif
568
569 static int function_stat_headers(struct seq_file *m)
570 {
571 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
572 seq_printf(m, " Function "
573 "Hit Time Avg s^2\n"
574 " -------- "
575 "--- ---- --- ---\n");
576 #else
577 seq_printf(m, " Function Hit\n"
578 " -------- ---\n");
579 #endif
580 return 0;
581 }
582
583 static int function_stat_show(struct seq_file *m, void *v)
584 {
585 struct ftrace_profile *rec = v;
586 char str[KSYM_SYMBOL_LEN];
587 int ret = 0;
588 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
589 static struct trace_seq s;
590 unsigned long long avg;
591 unsigned long long stddev;
592 #endif
593 mutex_lock(&ftrace_profile_lock);
594
595 /* we raced with function_profile_reset() */
596 if (unlikely(rec->counter == 0)) {
597 ret = -EBUSY;
598 goto out;
599 }
600
601 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
602 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
603
604 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
605 seq_printf(m, " ");
606 avg = rec->time;
607 do_div(avg, rec->counter);
608
609 /* Sample standard deviation (s^2) */
610 if (rec->counter <= 1)
611 stddev = 0;
612 else {
613 /*
614 * Apply Welford's method:
615 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
616 */
617 stddev = rec->counter * rec->time_squared -
618 rec->time * rec->time;
619
620 /*
621 * Divide only 1000 for ns^2 -> us^2 conversion.
622 * trace_print_graph_duration will divide 1000 again.
623 */
624 do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
625 }
626
627 trace_seq_init(&s);
628 trace_print_graph_duration(rec->time, &s);
629 trace_seq_puts(&s, " ");
630 trace_print_graph_duration(avg, &s);
631 trace_seq_puts(&s, " ");
632 trace_print_graph_duration(stddev, &s);
633 trace_print_seq(m, &s);
634 #endif
635 seq_putc(m, '\n');
636 out:
637 mutex_unlock(&ftrace_profile_lock);
638
639 return ret;
640 }
641
642 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
643 {
644 struct ftrace_profile_page *pg;
645
646 pg = stat->pages = stat->start;
647
648 while (pg) {
649 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
650 pg->index = 0;
651 pg = pg->next;
652 }
653
654 memset(stat->hash, 0,
655 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
656 }
657
658 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
659 {
660 struct ftrace_profile_page *pg;
661 int functions;
662 int pages;
663 int i;
664
665 /* If we already allocated, do nothing */
666 if (stat->pages)
667 return 0;
668
669 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
670 if (!stat->pages)
671 return -ENOMEM;
672
673 #ifdef CONFIG_DYNAMIC_FTRACE
674 functions = ftrace_update_tot_cnt;
675 #else
676 /*
677 * We do not know the number of functions that exist because
678 * dynamic tracing is what counts them. With past experience
679 * we have around 20K functions. That should be more than enough.
680 * It is highly unlikely we will execute every function in
681 * the kernel.
682 */
683 functions = 20000;
684 #endif
685
686 pg = stat->start = stat->pages;
687
688 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
689
690 for (i = 1; i < pages; i++) {
691 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
692 if (!pg->next)
693 goto out_free;
694 pg = pg->next;
695 }
696
697 return 0;
698
699 out_free:
700 pg = stat->start;
701 while (pg) {
702 unsigned long tmp = (unsigned long)pg;
703
704 pg = pg->next;
705 free_page(tmp);
706 }
707
708 stat->pages = NULL;
709 stat->start = NULL;
710
711 return -ENOMEM;
712 }
713
714 static int ftrace_profile_init_cpu(int cpu)
715 {
716 struct ftrace_profile_stat *stat;
717 int size;
718
719 stat = &per_cpu(ftrace_profile_stats, cpu);
720
721 if (stat->hash) {
722 /* If the profile is already created, simply reset it */
723 ftrace_profile_reset(stat);
724 return 0;
725 }
726
727 /*
728 * We are profiling all functions, but usually only a few thousand
729 * functions are hit. We'll make a hash of 1024 items.
730 */
731 size = FTRACE_PROFILE_HASH_SIZE;
732
733 stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
734
735 if (!stat->hash)
736 return -ENOMEM;
737
738 /* Preallocate the function profiling pages */
739 if (ftrace_profile_pages_init(stat) < 0) {
740 kfree(stat->hash);
741 stat->hash = NULL;
742 return -ENOMEM;
743 }
744
745 return 0;
746 }
747
748 static int ftrace_profile_init(void)
749 {
750 int cpu;
751 int ret = 0;
752
753 for_each_possible_cpu(cpu) {
754 ret = ftrace_profile_init_cpu(cpu);
755 if (ret)
756 break;
757 }
758
759 return ret;
760 }
761
762 /* interrupts must be disabled */
763 static struct ftrace_profile *
764 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
765 {
766 struct ftrace_profile *rec;
767 struct hlist_head *hhd;
768 unsigned long key;
769
770 key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
771 hhd = &stat->hash[key];
772
773 if (hlist_empty(hhd))
774 return NULL;
775
776 hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
777 if (rec->ip == ip)
778 return rec;
779 }
780
781 return NULL;
782 }
783
784 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
785 struct ftrace_profile *rec)
786 {
787 unsigned long key;
788
789 key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
790 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
791 }
792
793 /*
794 * The memory is already allocated, this simply finds a new record to use.
795 */
796 static struct ftrace_profile *
797 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
798 {
799 struct ftrace_profile *rec = NULL;
800
801 /* prevent recursion (from NMIs) */
802 if (atomic_inc_return(&stat->disabled) != 1)
803 goto out;
804
805 /*
806 * Try to find the function again since an NMI
807 * could have added it
808 */
809 rec = ftrace_find_profiled_func(stat, ip);
810 if (rec)
811 goto out;
812
813 if (stat->pages->index == PROFILES_PER_PAGE) {
814 if (!stat->pages->next)
815 goto out;
816 stat->pages = stat->pages->next;
817 }
818
819 rec = &stat->pages->records[stat->pages->index++];
820 rec->ip = ip;
821 ftrace_add_profile(stat, rec);
822
823 out:
824 atomic_dec(&stat->disabled);
825
826 return rec;
827 }
828
829 static void
830 function_profile_call(unsigned long ip, unsigned long parent_ip,
831 struct ftrace_ops *ops, struct pt_regs *regs)
832 {
833 struct ftrace_profile_stat *stat;
834 struct ftrace_profile *rec;
835 unsigned long flags;
836
837 if (!ftrace_profile_enabled)
838 return;
839
840 local_irq_save(flags);
841
842 stat = this_cpu_ptr(&ftrace_profile_stats);
843 if (!stat->hash || !ftrace_profile_enabled)
844 goto out;
845
846 rec = ftrace_find_profiled_func(stat, ip);
847 if (!rec) {
848 rec = ftrace_profile_alloc(stat, ip);
849 if (!rec)
850 goto out;
851 }
852
853 rec->counter++;
854 out:
855 local_irq_restore(flags);
856 }
857
858 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
859 static int profile_graph_entry(struct ftrace_graph_ent *trace)
860 {
861 function_profile_call(trace->func, 0, NULL, NULL);
862 return 1;
863 }
864
865 static void profile_graph_return(struct ftrace_graph_ret *trace)
866 {
867 struct ftrace_profile_stat *stat;
868 unsigned long long calltime;
869 struct ftrace_profile *rec;
870 unsigned long flags;
871
872 local_irq_save(flags);
873 stat = this_cpu_ptr(&ftrace_profile_stats);
874 if (!stat->hash || !ftrace_profile_enabled)
875 goto out;
876
877 /* If the calltime was zero'd ignore it */
878 if (!trace->calltime)
879 goto out;
880
881 calltime = trace->rettime - trace->calltime;
882
883 if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
884 int index;
885
886 index = trace->depth;
887
888 /* Append this call time to the parent time to subtract */
889 if (index)
890 current->ret_stack[index - 1].subtime += calltime;
891
892 if (current->ret_stack[index].subtime < calltime)
893 calltime -= current->ret_stack[index].subtime;
894 else
895 calltime = 0;
896 }
897
898 rec = ftrace_find_profiled_func(stat, trace->func);
899 if (rec) {
900 rec->time += calltime;
901 rec->time_squared += calltime * calltime;
902 }
903
904 out:
905 local_irq_restore(flags);
906 }
907
908 static int register_ftrace_profiler(void)
909 {
910 return register_ftrace_graph(&profile_graph_return,
911 &profile_graph_entry);
912 }
913
914 static void unregister_ftrace_profiler(void)
915 {
916 unregister_ftrace_graph();
917 }
918 #else
919 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
920 .func = function_profile_call,
921 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
922 INIT_OPS_HASH(ftrace_profile_ops)
923 };
924
925 static int register_ftrace_profiler(void)
926 {
927 return register_ftrace_function(&ftrace_profile_ops);
928 }
929
930 static void unregister_ftrace_profiler(void)
931 {
932 unregister_ftrace_function(&ftrace_profile_ops);
933 }
934 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
935
936 static ssize_t
937 ftrace_profile_write(struct file *filp, const char __user *ubuf,
938 size_t cnt, loff_t *ppos)
939 {
940 unsigned long val;
941 int ret;
942
943 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
944 if (ret)
945 return ret;
946
947 val = !!val;
948
949 mutex_lock(&ftrace_profile_lock);
950 if (ftrace_profile_enabled ^ val) {
951 if (val) {
952 ret = ftrace_profile_init();
953 if (ret < 0) {
954 cnt = ret;
955 goto out;
956 }
957
958 ret = register_ftrace_profiler();
959 if (ret < 0) {
960 cnt = ret;
961 goto out;
962 }
963 ftrace_profile_enabled = 1;
964 } else {
965 ftrace_profile_enabled = 0;
966 /*
967 * unregister_ftrace_profiler calls stop_machine
968 * so this acts like an synchronize_sched.
969 */
970 unregister_ftrace_profiler();
971 }
972 }
973 out:
974 mutex_unlock(&ftrace_profile_lock);
975
976 *ppos += cnt;
977
978 return cnt;
979 }
980
981 static ssize_t
982 ftrace_profile_read(struct file *filp, char __user *ubuf,
983 size_t cnt, loff_t *ppos)
984 {
985 char buf[64]; /* big enough to hold a number */
986 int r;
987
988 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
989 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
990 }
991
992 static const struct file_operations ftrace_profile_fops = {
993 .open = tracing_open_generic,
994 .read = ftrace_profile_read,
995 .write = ftrace_profile_write,
996 .llseek = default_llseek,
997 };
998
999 /* used to initialize the real stat files */
1000 static struct tracer_stat function_stats __initdata = {
1001 .name = "functions",
1002 .stat_start = function_stat_start,
1003 .stat_next = function_stat_next,
1004 .stat_cmp = function_stat_cmp,
1005 .stat_headers = function_stat_headers,
1006 .stat_show = function_stat_show
1007 };
1008
1009 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1010 {
1011 struct ftrace_profile_stat *stat;
1012 struct dentry *entry;
1013 char *name;
1014 int ret;
1015 int cpu;
1016
1017 for_each_possible_cpu(cpu) {
1018 stat = &per_cpu(ftrace_profile_stats, cpu);
1019
1020 /* allocate enough for function name + cpu number */
1021 name = kmalloc(32, GFP_KERNEL);
1022 if (!name) {
1023 /*
1024 * The files created are permanent, if something happens
1025 * we still do not free memory.
1026 */
1027 WARN(1,
1028 "Could not allocate stat file for cpu %d\n",
1029 cpu);
1030 return;
1031 }
1032 stat->stat = function_stats;
1033 snprintf(name, 32, "function%d", cpu);
1034 stat->stat.name = name;
1035 ret = register_stat_tracer(&stat->stat);
1036 if (ret) {
1037 WARN(1,
1038 "Could not register function stat for cpu %d\n",
1039 cpu);
1040 kfree(name);
1041 return;
1042 }
1043 }
1044
1045 entry = debugfs_create_file("function_profile_enabled", 0644,
1046 d_tracer, NULL, &ftrace_profile_fops);
1047 if (!entry)
1048 pr_warning("Could not create debugfs "
1049 "'function_profile_enabled' entry\n");
1050 }
1051
1052 #else /* CONFIG_FUNCTION_PROFILER */
1053 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1054 {
1055 }
1056 #endif /* CONFIG_FUNCTION_PROFILER */
1057
1058 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1059
1060 #ifdef CONFIG_DYNAMIC_FTRACE
1061
1062 static struct ftrace_ops *removed_ops;
1063
1064 /*
1065 * Set when doing a global update, like enabling all recs or disabling them.
1066 * It is not set when just updating a single ftrace_ops.
1067 */
1068 static bool update_all_ops;
1069
1070 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
1071 # error Dynamic ftrace depends on MCOUNT_RECORD
1072 #endif
1073
1074 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
1075
1076 struct ftrace_func_probe {
1077 struct hlist_node node;
1078 struct ftrace_probe_ops *ops;
1079 unsigned long flags;
1080 unsigned long ip;
1081 void *data;
1082 struct list_head free_list;
1083 };
1084
1085 struct ftrace_func_entry {
1086 struct hlist_node hlist;
1087 unsigned long ip;
1088 };
1089
1090 struct ftrace_hash {
1091 unsigned long size_bits;
1092 struct hlist_head *buckets;
1093 unsigned long count;
1094 struct rcu_head rcu;
1095 };
1096
1097 /*
1098 * We make these constant because no one should touch them,
1099 * but they are used as the default "empty hash", to avoid allocating
1100 * it all the time. These are in a read only section such that if
1101 * anyone does try to modify it, it will cause an exception.
1102 */
1103 static const struct hlist_head empty_buckets[1];
1104 static const struct ftrace_hash empty_hash = {
1105 .buckets = (struct hlist_head *)empty_buckets,
1106 };
1107 #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
1108
1109 static struct ftrace_ops global_ops = {
1110 .func = ftrace_stub,
1111 .local_hash.notrace_hash = EMPTY_HASH,
1112 .local_hash.filter_hash = EMPTY_HASH,
1113 INIT_OPS_HASH(global_ops)
1114 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
1115 FTRACE_OPS_FL_INITIALIZED,
1116 };
1117
1118 struct ftrace_page {
1119 struct ftrace_page *next;
1120 struct dyn_ftrace *records;
1121 int index;
1122 int size;
1123 };
1124
1125 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
1126 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1127
1128 /* estimate from running different kernels */
1129 #define NR_TO_INIT 10000
1130
1131 static struct ftrace_page *ftrace_pages_start;
1132 static struct ftrace_page *ftrace_pages;
1133
1134 static bool __always_inline ftrace_hash_empty(struct ftrace_hash *hash)
1135 {
1136 return !hash || !hash->count;
1137 }
1138
1139 static struct ftrace_func_entry *
1140 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1141 {
1142 unsigned long key;
1143 struct ftrace_func_entry *entry;
1144 struct hlist_head *hhd;
1145
1146 if (ftrace_hash_empty(hash))
1147 return NULL;
1148
1149 if (hash->size_bits > 0)
1150 key = hash_long(ip, hash->size_bits);
1151 else
1152 key = 0;
1153
1154 hhd = &hash->buckets[key];
1155
1156 hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1157 if (entry->ip == ip)
1158 return entry;
1159 }
1160 return NULL;
1161 }
1162
1163 static void __add_hash_entry(struct ftrace_hash *hash,
1164 struct ftrace_func_entry *entry)
1165 {
1166 struct hlist_head *hhd;
1167 unsigned long key;
1168
1169 if (hash->size_bits)
1170 key = hash_long(entry->ip, hash->size_bits);
1171 else
1172 key = 0;
1173
1174 hhd = &hash->buckets[key];
1175 hlist_add_head(&entry->hlist, hhd);
1176 hash->count++;
1177 }
1178
1179 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1180 {
1181 struct ftrace_func_entry *entry;
1182
1183 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1184 if (!entry)
1185 return -ENOMEM;
1186
1187 entry->ip = ip;
1188 __add_hash_entry(hash, entry);
1189
1190 return 0;
1191 }
1192
1193 static void
1194 free_hash_entry(struct ftrace_hash *hash,
1195 struct ftrace_func_entry *entry)
1196 {
1197 hlist_del(&entry->hlist);
1198 kfree(entry);
1199 hash->count--;
1200 }
1201
1202 static void
1203 remove_hash_entry(struct ftrace_hash *hash,
1204 struct ftrace_func_entry *entry)
1205 {
1206 hlist_del(&entry->hlist);
1207 hash->count--;
1208 }
1209
1210 static void ftrace_hash_clear(struct ftrace_hash *hash)
1211 {
1212 struct hlist_head *hhd;
1213 struct hlist_node *tn;
1214 struct ftrace_func_entry *entry;
1215 int size = 1 << hash->size_bits;
1216 int i;
1217
1218 if (!hash->count)
1219 return;
1220
1221 for (i = 0; i < size; i++) {
1222 hhd = &hash->buckets[i];
1223 hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1224 free_hash_entry(hash, entry);
1225 }
1226 FTRACE_WARN_ON(hash->count);
1227 }
1228
1229 static void free_ftrace_hash(struct ftrace_hash *hash)
1230 {
1231 if (!hash || hash == EMPTY_HASH)
1232 return;
1233 ftrace_hash_clear(hash);
1234 kfree(hash->buckets);
1235 kfree(hash);
1236 }
1237
1238 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1239 {
1240 struct ftrace_hash *hash;
1241
1242 hash = container_of(rcu, struct ftrace_hash, rcu);
1243 free_ftrace_hash(hash);
1244 }
1245
1246 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1247 {
1248 if (!hash || hash == EMPTY_HASH)
1249 return;
1250 call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1251 }
1252
1253 void ftrace_free_filter(struct ftrace_ops *ops)
1254 {
1255 ftrace_ops_init(ops);
1256 free_ftrace_hash(ops->func_hash->filter_hash);
1257 free_ftrace_hash(ops->func_hash->notrace_hash);
1258 }
1259
1260 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1261 {
1262 struct ftrace_hash *hash;
1263 int size;
1264
1265 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1266 if (!hash)
1267 return NULL;
1268
1269 size = 1 << size_bits;
1270 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1271
1272 if (!hash->buckets) {
1273 kfree(hash);
1274 return NULL;
1275 }
1276
1277 hash->size_bits = size_bits;
1278
1279 return hash;
1280 }
1281
1282 static struct ftrace_hash *
1283 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1284 {
1285 struct ftrace_func_entry *entry;
1286 struct ftrace_hash *new_hash;
1287 int size;
1288 int ret;
1289 int i;
1290
1291 new_hash = alloc_ftrace_hash(size_bits);
1292 if (!new_hash)
1293 return NULL;
1294
1295 /* Empty hash? */
1296 if (ftrace_hash_empty(hash))
1297 return new_hash;
1298
1299 size = 1 << hash->size_bits;
1300 for (i = 0; i < size; i++) {
1301 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1302 ret = add_hash_entry(new_hash, entry->ip);
1303 if (ret < 0)
1304 goto free_hash;
1305 }
1306 }
1307
1308 FTRACE_WARN_ON(new_hash->count != hash->count);
1309
1310 return new_hash;
1311
1312 free_hash:
1313 free_ftrace_hash(new_hash);
1314 return NULL;
1315 }
1316
1317 static void
1318 ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
1319 static void
1320 ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
1321
1322 static int
1323 ftrace_hash_move(struct ftrace_ops *ops, int enable,
1324 struct ftrace_hash **dst, struct ftrace_hash *src)
1325 {
1326 struct ftrace_func_entry *entry;
1327 struct hlist_node *tn;
1328 struct hlist_head *hhd;
1329 struct ftrace_hash *new_hash;
1330 int size = src->count;
1331 int bits = 0;
1332 int i;
1333
1334 /*
1335 * If the new source is empty, just free dst and assign it
1336 * the empty_hash.
1337 */
1338 if (!src->count) {
1339 new_hash = EMPTY_HASH;
1340 goto update;
1341 }
1342
1343 /*
1344 * Make the hash size about 1/2 the # found
1345 */
1346 for (size /= 2; size; size >>= 1)
1347 bits++;
1348
1349 /* Don't allocate too much */
1350 if (bits > FTRACE_HASH_MAX_BITS)
1351 bits = FTRACE_HASH_MAX_BITS;
1352
1353 new_hash = alloc_ftrace_hash(bits);
1354 if (!new_hash)
1355 return -ENOMEM;
1356
1357 size = 1 << src->size_bits;
1358 for (i = 0; i < size; i++) {
1359 hhd = &src->buckets[i];
1360 hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1361 remove_hash_entry(src, entry);
1362 __add_hash_entry(new_hash, entry);
1363 }
1364 }
1365
1366 update:
1367 /*
1368 * Remove the current set, update the hash and add
1369 * them back.
1370 */
1371 ftrace_hash_rec_disable_modify(ops, enable);
1372
1373 rcu_assign_pointer(*dst, new_hash);
1374
1375 ftrace_hash_rec_enable_modify(ops, enable);
1376
1377 return 0;
1378 }
1379
1380 static bool hash_contains_ip(unsigned long ip,
1381 struct ftrace_ops_hash *hash)
1382 {
1383 /*
1384 * The function record is a match if it exists in the filter
1385 * hash and not in the notrace hash. Note, an emty hash is
1386 * considered a match for the filter hash, but an empty
1387 * notrace hash is considered not in the notrace hash.
1388 */
1389 return (ftrace_hash_empty(hash->filter_hash) ||
1390 ftrace_lookup_ip(hash->filter_hash, ip)) &&
1391 (ftrace_hash_empty(hash->notrace_hash) ||
1392 !ftrace_lookup_ip(hash->notrace_hash, ip));
1393 }
1394
1395 /*
1396 * Test the hashes for this ops to see if we want to call
1397 * the ops->func or not.
1398 *
1399 * It's a match if the ip is in the ops->filter_hash or
1400 * the filter_hash does not exist or is empty,
1401 * AND
1402 * the ip is not in the ops->notrace_hash.
1403 *
1404 * This needs to be called with preemption disabled as
1405 * the hashes are freed with call_rcu_sched().
1406 */
1407 static int
1408 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1409 {
1410 struct ftrace_ops_hash hash;
1411 int ret;
1412
1413 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1414 /*
1415 * There's a small race when adding ops that the ftrace handler
1416 * that wants regs, may be called without them. We can not
1417 * allow that handler to be called if regs is NULL.
1418 */
1419 if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1420 return 0;
1421 #endif
1422
1423 hash.filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash);
1424 hash.notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash);
1425
1426 if (hash_contains_ip(ip, &hash))
1427 ret = 1;
1428 else
1429 ret = 0;
1430
1431 return ret;
1432 }
1433
1434 /*
1435 * This is a double for. Do not use 'break' to break out of the loop,
1436 * you must use a goto.
1437 */
1438 #define do_for_each_ftrace_rec(pg, rec) \
1439 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1440 int _____i; \
1441 for (_____i = 0; _____i < pg->index; _____i++) { \
1442 rec = &pg->records[_____i];
1443
1444 #define while_for_each_ftrace_rec() \
1445 } \
1446 }
1447
1448
1449 static int ftrace_cmp_recs(const void *a, const void *b)
1450 {
1451 const struct dyn_ftrace *key = a;
1452 const struct dyn_ftrace *rec = b;
1453
1454 if (key->flags < rec->ip)
1455 return -1;
1456 if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1457 return 1;
1458 return 0;
1459 }
1460
1461 static unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1462 {
1463 struct ftrace_page *pg;
1464 struct dyn_ftrace *rec;
1465 struct dyn_ftrace key;
1466
1467 key.ip = start;
1468 key.flags = end; /* overload flags, as it is unsigned long */
1469
1470 for (pg = ftrace_pages_start; pg; pg = pg->next) {
1471 if (end < pg->records[0].ip ||
1472 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1473 continue;
1474 rec = bsearch(&key, pg->records, pg->index,
1475 sizeof(struct dyn_ftrace),
1476 ftrace_cmp_recs);
1477 if (rec)
1478 return rec->ip;
1479 }
1480
1481 return 0;
1482 }
1483
1484 /**
1485 * ftrace_location - return true if the ip giving is a traced location
1486 * @ip: the instruction pointer to check
1487 *
1488 * Returns rec->ip if @ip given is a pointer to a ftrace location.
1489 * That is, the instruction that is either a NOP or call to
1490 * the function tracer. It checks the ftrace internal tables to
1491 * determine if the address belongs or not.
1492 */
1493 unsigned long ftrace_location(unsigned long ip)
1494 {
1495 return ftrace_location_range(ip, ip);
1496 }
1497
1498 /**
1499 * ftrace_text_reserved - return true if range contains an ftrace location
1500 * @start: start of range to search
1501 * @end: end of range to search (inclusive). @end points to the last byte to check.
1502 *
1503 * Returns 1 if @start and @end contains a ftrace location.
1504 * That is, the instruction that is either a NOP or call to
1505 * the function tracer. It checks the ftrace internal tables to
1506 * determine if the address belongs or not.
1507 */
1508 int ftrace_text_reserved(const void *start, const void *end)
1509 {
1510 unsigned long ret;
1511
1512 ret = ftrace_location_range((unsigned long)start,
1513 (unsigned long)end);
1514
1515 return (int)!!ret;
1516 }
1517
1518 /* Test if ops registered to this rec needs regs */
1519 static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
1520 {
1521 struct ftrace_ops *ops;
1522 bool keep_regs = false;
1523
1524 for (ops = ftrace_ops_list;
1525 ops != &ftrace_list_end; ops = ops->next) {
1526 /* pass rec in as regs to have non-NULL val */
1527 if (ftrace_ops_test(ops, rec->ip, rec)) {
1528 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1529 keep_regs = true;
1530 break;
1531 }
1532 }
1533 }
1534
1535 return keep_regs;
1536 }
1537
1538 static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1539 int filter_hash,
1540 bool inc)
1541 {
1542 struct ftrace_hash *hash;
1543 struct ftrace_hash *other_hash;
1544 struct ftrace_page *pg;
1545 struct dyn_ftrace *rec;
1546 int count = 0;
1547 int all = 0;
1548
1549 /* Only update if the ops has been registered */
1550 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1551 return;
1552
1553 /*
1554 * In the filter_hash case:
1555 * If the count is zero, we update all records.
1556 * Otherwise we just update the items in the hash.
1557 *
1558 * In the notrace_hash case:
1559 * We enable the update in the hash.
1560 * As disabling notrace means enabling the tracing,
1561 * and enabling notrace means disabling, the inc variable
1562 * gets inversed.
1563 */
1564 if (filter_hash) {
1565 hash = ops->func_hash->filter_hash;
1566 other_hash = ops->func_hash->notrace_hash;
1567 if (ftrace_hash_empty(hash))
1568 all = 1;
1569 } else {
1570 inc = !inc;
1571 hash = ops->func_hash->notrace_hash;
1572 other_hash = ops->func_hash->filter_hash;
1573 /*
1574 * If the notrace hash has no items,
1575 * then there's nothing to do.
1576 */
1577 if (ftrace_hash_empty(hash))
1578 return;
1579 }
1580
1581 do_for_each_ftrace_rec(pg, rec) {
1582 int in_other_hash = 0;
1583 int in_hash = 0;
1584 int match = 0;
1585
1586 if (all) {
1587 /*
1588 * Only the filter_hash affects all records.
1589 * Update if the record is not in the notrace hash.
1590 */
1591 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1592 match = 1;
1593 } else {
1594 in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1595 in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1596
1597 /*
1598 * If filter_hash is set, we want to match all functions
1599 * that are in the hash but not in the other hash.
1600 *
1601 * If filter_hash is not set, then we are decrementing.
1602 * That means we match anything that is in the hash
1603 * and also in the other_hash. That is, we need to turn
1604 * off functions in the other hash because they are disabled
1605 * by this hash.
1606 */
1607 if (filter_hash && in_hash && !in_other_hash)
1608 match = 1;
1609 else if (!filter_hash && in_hash &&
1610 (in_other_hash || ftrace_hash_empty(other_hash)))
1611 match = 1;
1612 }
1613 if (!match)
1614 continue;
1615
1616 if (inc) {
1617 rec->flags++;
1618 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
1619 return;
1620
1621 /*
1622 * If there's only a single callback registered to a
1623 * function, and the ops has a trampoline registered
1624 * for it, then we can call it directly.
1625 */
1626 if (ftrace_rec_count(rec) == 1 && ops->trampoline)
1627 rec->flags |= FTRACE_FL_TRAMP;
1628 else
1629 /*
1630 * If we are adding another function callback
1631 * to this function, and the previous had a
1632 * custom trampoline in use, then we need to go
1633 * back to the default trampoline.
1634 */
1635 rec->flags &= ~FTRACE_FL_TRAMP;
1636
1637 /*
1638 * If any ops wants regs saved for this function
1639 * then all ops will get saved regs.
1640 */
1641 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1642 rec->flags |= FTRACE_FL_REGS;
1643 } else {
1644 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
1645 return;
1646 rec->flags--;
1647
1648 /*
1649 * If the rec had REGS enabled and the ops that is
1650 * being removed had REGS set, then see if there is
1651 * still any ops for this record that wants regs.
1652 * If not, we can stop recording them.
1653 */
1654 if (ftrace_rec_count(rec) > 0 &&
1655 rec->flags & FTRACE_FL_REGS &&
1656 ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1657 if (!test_rec_ops_needs_regs(rec))
1658 rec->flags &= ~FTRACE_FL_REGS;
1659 }
1660
1661 /*
1662 * If the rec had TRAMP enabled, then it needs to
1663 * be cleared. As TRAMP can only be enabled iff
1664 * there is only a single ops attached to it.
1665 * In otherwords, always disable it on decrementing.
1666 * In the future, we may set it if rec count is
1667 * decremented to one, and the ops that is left
1668 * has a trampoline.
1669 */
1670 rec->flags &= ~FTRACE_FL_TRAMP;
1671
1672 /*
1673 * flags will be cleared in ftrace_check_record()
1674 * if rec count is zero.
1675 */
1676 }
1677 count++;
1678 /* Shortcut, if we handled all records, we are done. */
1679 if (!all && count == hash->count)
1680 return;
1681 } while_for_each_ftrace_rec();
1682 }
1683
1684 static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1685 int filter_hash)
1686 {
1687 __ftrace_hash_rec_update(ops, filter_hash, 0);
1688 }
1689
1690 static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1691 int filter_hash)
1692 {
1693 __ftrace_hash_rec_update(ops, filter_hash, 1);
1694 }
1695
1696 static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
1697 int filter_hash, int inc)
1698 {
1699 struct ftrace_ops *op;
1700
1701 __ftrace_hash_rec_update(ops, filter_hash, inc);
1702
1703 if (ops->func_hash != &global_ops.local_hash)
1704 return;
1705
1706 /*
1707 * If the ops shares the global_ops hash, then we need to update
1708 * all ops that are enabled and use this hash.
1709 */
1710 do_for_each_ftrace_op(op, ftrace_ops_list) {
1711 /* Already done */
1712 if (op == ops)
1713 continue;
1714 if (op->func_hash == &global_ops.local_hash)
1715 __ftrace_hash_rec_update(op, filter_hash, inc);
1716 } while_for_each_ftrace_op(op);
1717 }
1718
1719 static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
1720 int filter_hash)
1721 {
1722 ftrace_hash_rec_update_modify(ops, filter_hash, 0);
1723 }
1724
1725 static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
1726 int filter_hash)
1727 {
1728 ftrace_hash_rec_update_modify(ops, filter_hash, 1);
1729 }
1730
1731 static void print_ip_ins(const char *fmt, unsigned char *p)
1732 {
1733 int i;
1734
1735 printk(KERN_CONT "%s", fmt);
1736
1737 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1738 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1739 }
1740
1741 /**
1742 * ftrace_bug - report and shutdown function tracer
1743 * @failed: The failed type (EFAULT, EINVAL, EPERM)
1744 * @ip: The address that failed
1745 *
1746 * The arch code that enables or disables the function tracing
1747 * can call ftrace_bug() when it has detected a problem in
1748 * modifying the code. @failed should be one of either:
1749 * EFAULT - if the problem happens on reading the @ip address
1750 * EINVAL - if what is read at @ip is not what was expected
1751 * EPERM - if the problem happens on writting to the @ip address
1752 */
1753 void ftrace_bug(int failed, unsigned long ip)
1754 {
1755 switch (failed) {
1756 case -EFAULT:
1757 FTRACE_WARN_ON_ONCE(1);
1758 pr_info("ftrace faulted on modifying ");
1759 print_ip_sym(ip);
1760 break;
1761 case -EINVAL:
1762 FTRACE_WARN_ON_ONCE(1);
1763 pr_info("ftrace failed to modify ");
1764 print_ip_sym(ip);
1765 print_ip_ins(" actual: ", (unsigned char *)ip);
1766 printk(KERN_CONT "\n");
1767 break;
1768 case -EPERM:
1769 FTRACE_WARN_ON_ONCE(1);
1770 pr_info("ftrace faulted on writing ");
1771 print_ip_sym(ip);
1772 break;
1773 default:
1774 FTRACE_WARN_ON_ONCE(1);
1775 pr_info("ftrace faulted on unknown error ");
1776 print_ip_sym(ip);
1777 }
1778 }
1779
1780 static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
1781 {
1782 unsigned long flag = 0UL;
1783
1784 /*
1785 * If we are updating calls:
1786 *
1787 * If the record has a ref count, then we need to enable it
1788 * because someone is using it.
1789 *
1790 * Otherwise we make sure its disabled.
1791 *
1792 * If we are disabling calls, then disable all records that
1793 * are enabled.
1794 */
1795 if (enable && ftrace_rec_count(rec))
1796 flag = FTRACE_FL_ENABLED;
1797
1798 /*
1799 * If enabling and the REGS flag does not match the REGS_EN, or
1800 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
1801 * this record. Set flags to fail the compare against ENABLED.
1802 */
1803 if (flag) {
1804 if (!(rec->flags & FTRACE_FL_REGS) !=
1805 !(rec->flags & FTRACE_FL_REGS_EN))
1806 flag |= FTRACE_FL_REGS;
1807
1808 if (!(rec->flags & FTRACE_FL_TRAMP) !=
1809 !(rec->flags & FTRACE_FL_TRAMP_EN))
1810 flag |= FTRACE_FL_TRAMP;
1811 }
1812
1813 /* If the state of this record hasn't changed, then do nothing */
1814 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1815 return FTRACE_UPDATE_IGNORE;
1816
1817 if (flag) {
1818 /* Save off if rec is being enabled (for return value) */
1819 flag ^= rec->flags & FTRACE_FL_ENABLED;
1820
1821 if (update) {
1822 rec->flags |= FTRACE_FL_ENABLED;
1823 if (flag & FTRACE_FL_REGS) {
1824 if (rec->flags & FTRACE_FL_REGS)
1825 rec->flags |= FTRACE_FL_REGS_EN;
1826 else
1827 rec->flags &= ~FTRACE_FL_REGS_EN;
1828 }
1829 if (flag & FTRACE_FL_TRAMP) {
1830 if (rec->flags & FTRACE_FL_TRAMP)
1831 rec->flags |= FTRACE_FL_TRAMP_EN;
1832 else
1833 rec->flags &= ~FTRACE_FL_TRAMP_EN;
1834 }
1835 }
1836
1837 /*
1838 * If this record is being updated from a nop, then
1839 * return UPDATE_MAKE_CALL.
1840 * Otherwise,
1841 * return UPDATE_MODIFY_CALL to tell the caller to convert
1842 * from the save regs, to a non-save regs function or
1843 * vice versa, or from a trampoline call.
1844 */
1845 if (flag & FTRACE_FL_ENABLED)
1846 return FTRACE_UPDATE_MAKE_CALL;
1847
1848 return FTRACE_UPDATE_MODIFY_CALL;
1849 }
1850
1851 if (update) {
1852 /* If there's no more users, clear all flags */
1853 if (!ftrace_rec_count(rec))
1854 rec->flags = 0;
1855 else
1856 /* Just disable the record (keep REGS state) */
1857 rec->flags &= ~FTRACE_FL_ENABLED;
1858 }
1859
1860 return FTRACE_UPDATE_MAKE_NOP;
1861 }
1862
1863 /**
1864 * ftrace_update_record, set a record that now is tracing or not
1865 * @rec: the record to update
1866 * @enable: set to 1 if the record is tracing, zero to force disable
1867 *
1868 * The records that represent all functions that can be traced need
1869 * to be updated when tracing has been enabled.
1870 */
1871 int ftrace_update_record(struct dyn_ftrace *rec, int enable)
1872 {
1873 return ftrace_check_record(rec, enable, 1);
1874 }
1875
1876 /**
1877 * ftrace_test_record, check if the record has been enabled or not
1878 * @rec: the record to test
1879 * @enable: set to 1 to check if enabled, 0 if it is disabled
1880 *
1881 * The arch code may need to test if a record is already set to
1882 * tracing to determine how to modify the function code that it
1883 * represents.
1884 */
1885 int ftrace_test_record(struct dyn_ftrace *rec, int enable)
1886 {
1887 return ftrace_check_record(rec, enable, 0);
1888 }
1889
1890 static struct ftrace_ops *
1891 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
1892 {
1893 struct ftrace_ops *op;
1894 unsigned long ip = rec->ip;
1895
1896 do_for_each_ftrace_op(op, ftrace_ops_list) {
1897
1898 if (!op->trampoline)
1899 continue;
1900
1901 if (hash_contains_ip(ip, op->func_hash))
1902 return op;
1903 } while_for_each_ftrace_op(op);
1904
1905 return NULL;
1906 }
1907
1908 static struct ftrace_ops *
1909 ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
1910 {
1911 struct ftrace_ops *op;
1912 unsigned long ip = rec->ip;
1913
1914 /*
1915 * Need to check removed ops first.
1916 * If they are being removed, and this rec has a tramp,
1917 * and this rec is in the ops list, then it would be the
1918 * one with the tramp.
1919 */
1920 if (removed_ops) {
1921 if (hash_contains_ip(ip, &removed_ops->old_hash))
1922 return removed_ops;
1923 }
1924
1925 /*
1926 * Need to find the current trampoline for a rec.
1927 * Now, a trampoline is only attached to a rec if there
1928 * was a single 'ops' attached to it. But this can be called
1929 * when we are adding another op to the rec or removing the
1930 * current one. Thus, if the op is being added, we can
1931 * ignore it because it hasn't attached itself to the rec
1932 * yet.
1933 *
1934 * If an ops is being modified (hooking to different functions)
1935 * then we don't care about the new functions that are being
1936 * added, just the old ones (that are probably being removed).
1937 *
1938 * If we are adding an ops to a function that already is using
1939 * a trampoline, it needs to be removed (trampolines are only
1940 * for single ops connected), then an ops that is not being
1941 * modified also needs to be checked.
1942 */
1943 do_for_each_ftrace_op(op, ftrace_ops_list) {
1944
1945 if (!op->trampoline)
1946 continue;
1947
1948 /*
1949 * If the ops is being added, it hasn't gotten to
1950 * the point to be removed from this tree yet.
1951 */
1952 if (op->flags & FTRACE_OPS_FL_ADDING)
1953 continue;
1954
1955
1956 /*
1957 * If the ops is being modified and is in the old
1958 * hash, then it is probably being removed from this
1959 * function.
1960 */
1961 if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
1962 hash_contains_ip(ip, &op->old_hash))
1963 return op;
1964 /*
1965 * If the ops is not being added or modified, and it's
1966 * in its normal filter hash, then this must be the one
1967 * we want!
1968 */
1969 if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
1970 hash_contains_ip(ip, op->func_hash))
1971 return op;
1972
1973 } while_for_each_ftrace_op(op);
1974
1975 return NULL;
1976 }
1977
1978 static struct ftrace_ops *
1979 ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
1980 {
1981 struct ftrace_ops *op;
1982 unsigned long ip = rec->ip;
1983
1984 do_for_each_ftrace_op(op, ftrace_ops_list) {
1985 /* pass rec in as regs to have non-NULL val */
1986 if (hash_contains_ip(ip, op->func_hash))
1987 return op;
1988 } while_for_each_ftrace_op(op);
1989
1990 return NULL;
1991 }
1992
1993 /**
1994 * ftrace_get_addr_new - Get the call address to set to
1995 * @rec: The ftrace record descriptor
1996 *
1997 * If the record has the FTRACE_FL_REGS set, that means that it
1998 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
1999 * is not not set, then it wants to convert to the normal callback.
2000 *
2001 * Returns the address of the trampoline to set to
2002 */
2003 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
2004 {
2005 struct ftrace_ops *ops;
2006
2007 /* Trampolines take precedence over regs */
2008 if (rec->flags & FTRACE_FL_TRAMP) {
2009 ops = ftrace_find_tramp_ops_new(rec);
2010 if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
2011 pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
2012 (void *)rec->ip, (void *)rec->ip, rec->flags);
2013 /* Ftrace is shutting down, return anything */
2014 return (unsigned long)FTRACE_ADDR;
2015 }
2016 return ops->trampoline;
2017 }
2018
2019 if (rec->flags & FTRACE_FL_REGS)
2020 return (unsigned long)FTRACE_REGS_ADDR;
2021 else
2022 return (unsigned long)FTRACE_ADDR;
2023 }
2024
2025 /**
2026 * ftrace_get_addr_curr - Get the call address that is already there
2027 * @rec: The ftrace record descriptor
2028 *
2029 * The FTRACE_FL_REGS_EN is set when the record already points to
2030 * a function that saves all the regs. Basically the '_EN' version
2031 * represents the current state of the function.
2032 *
2033 * Returns the address of the trampoline that is currently being called
2034 */
2035 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
2036 {
2037 struct ftrace_ops *ops;
2038
2039 /* Trampolines take precedence over regs */
2040 if (rec->flags & FTRACE_FL_TRAMP_EN) {
2041 ops = ftrace_find_tramp_ops_curr(rec);
2042 if (FTRACE_WARN_ON(!ops)) {
2043 pr_warning("Bad trampoline accounting at: %p (%pS)\n",
2044 (void *)rec->ip, (void *)rec->ip);
2045 /* Ftrace is shutting down, return anything */
2046 return (unsigned long)FTRACE_ADDR;
2047 }
2048 return ops->trampoline;
2049 }
2050
2051 if (rec->flags & FTRACE_FL_REGS_EN)
2052 return (unsigned long)FTRACE_REGS_ADDR;
2053 else
2054 return (unsigned long)FTRACE_ADDR;
2055 }
2056
2057 static int
2058 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
2059 {
2060 unsigned long ftrace_old_addr;
2061 unsigned long ftrace_addr;
2062 int ret;
2063
2064 ftrace_addr = ftrace_get_addr_new(rec);
2065
2066 /* This needs to be done before we call ftrace_update_record */
2067 ftrace_old_addr = ftrace_get_addr_curr(rec);
2068
2069 ret = ftrace_update_record(rec, enable);
2070
2071 switch (ret) {
2072 case FTRACE_UPDATE_IGNORE:
2073 return 0;
2074
2075 case FTRACE_UPDATE_MAKE_CALL:
2076 return ftrace_make_call(rec, ftrace_addr);
2077
2078 case FTRACE_UPDATE_MAKE_NOP:
2079 return ftrace_make_nop(NULL, rec, ftrace_old_addr);
2080
2081 case FTRACE_UPDATE_MODIFY_CALL:
2082 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
2083 }
2084
2085 return -1; /* unknow ftrace bug */
2086 }
2087
2088 void __weak ftrace_replace_code(int enable)
2089 {
2090 struct dyn_ftrace *rec;
2091 struct ftrace_page *pg;
2092 int failed;
2093
2094 if (unlikely(ftrace_disabled))
2095 return;
2096
2097 do_for_each_ftrace_rec(pg, rec) {
2098 failed = __ftrace_replace_code(rec, enable);
2099 if (failed) {
2100 ftrace_bug(failed, rec->ip);
2101 /* Stop processing */
2102 return;
2103 }
2104 } while_for_each_ftrace_rec();
2105 }
2106
2107 struct ftrace_rec_iter {
2108 struct ftrace_page *pg;
2109 int index;
2110 };
2111
2112 /**
2113 * ftrace_rec_iter_start, start up iterating over traced functions
2114 *
2115 * Returns an iterator handle that is used to iterate over all
2116 * the records that represent address locations where functions
2117 * are traced.
2118 *
2119 * May return NULL if no records are available.
2120 */
2121 struct ftrace_rec_iter *ftrace_rec_iter_start(void)
2122 {
2123 /*
2124 * We only use a single iterator.
2125 * Protected by the ftrace_lock mutex.
2126 */
2127 static struct ftrace_rec_iter ftrace_rec_iter;
2128 struct ftrace_rec_iter *iter = &ftrace_rec_iter;
2129
2130 iter->pg = ftrace_pages_start;
2131 iter->index = 0;
2132
2133 /* Could have empty pages */
2134 while (iter->pg && !iter->pg->index)
2135 iter->pg = iter->pg->next;
2136
2137 if (!iter->pg)
2138 return NULL;
2139
2140 return iter;
2141 }
2142
2143 /**
2144 * ftrace_rec_iter_next, get the next record to process.
2145 * @iter: The handle to the iterator.
2146 *
2147 * Returns the next iterator after the given iterator @iter.
2148 */
2149 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
2150 {
2151 iter->index++;
2152
2153 if (iter->index >= iter->pg->index) {
2154 iter->pg = iter->pg->next;
2155 iter->index = 0;
2156
2157 /* Could have empty pages */
2158 while (iter->pg && !iter->pg->index)
2159 iter->pg = iter->pg->next;
2160 }
2161
2162 if (!iter->pg)
2163 return NULL;
2164
2165 return iter;
2166 }
2167
2168 /**
2169 * ftrace_rec_iter_record, get the record at the iterator location
2170 * @iter: The current iterator location
2171 *
2172 * Returns the record that the current @iter is at.
2173 */
2174 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
2175 {
2176 return &iter->pg->records[iter->index];
2177 }
2178
2179 static int
2180 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
2181 {
2182 unsigned long ip;
2183 int ret;
2184
2185 ip = rec->ip;
2186
2187 if (unlikely(ftrace_disabled))
2188 return 0;
2189
2190 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
2191 if (ret) {
2192 ftrace_bug(ret, ip);
2193 return 0;
2194 }
2195 return 1;
2196 }
2197
2198 /*
2199 * archs can override this function if they must do something
2200 * before the modifying code is performed.
2201 */
2202 int __weak ftrace_arch_code_modify_prepare(void)
2203 {
2204 return 0;
2205 }
2206
2207 /*
2208 * archs can override this function if they must do something
2209 * after the modifying code is performed.
2210 */
2211 int __weak ftrace_arch_code_modify_post_process(void)
2212 {
2213 return 0;
2214 }
2215
2216 void ftrace_modify_all_code(int command)
2217 {
2218 int update = command & FTRACE_UPDATE_TRACE_FUNC;
2219 int err = 0;
2220
2221 /*
2222 * If the ftrace_caller calls a ftrace_ops func directly,
2223 * we need to make sure that it only traces functions it
2224 * expects to trace. When doing the switch of functions,
2225 * we need to update to the ftrace_ops_list_func first
2226 * before the transition between old and new calls are set,
2227 * as the ftrace_ops_list_func will check the ops hashes
2228 * to make sure the ops are having the right functions
2229 * traced.
2230 */
2231 if (update) {
2232 err = ftrace_update_ftrace_func(ftrace_ops_list_func);
2233 if (FTRACE_WARN_ON(err))
2234 return;
2235 }
2236
2237 if (command & FTRACE_UPDATE_CALLS)
2238 ftrace_replace_code(1);
2239 else if (command & FTRACE_DISABLE_CALLS)
2240 ftrace_replace_code(0);
2241
2242 if (update && ftrace_trace_function != ftrace_ops_list_func) {
2243 function_trace_op = set_function_trace_op;
2244 smp_wmb();
2245 /* If irqs are disabled, we are in stop machine */
2246 if (!irqs_disabled())
2247 smp_call_function(ftrace_sync_ipi, NULL, 1);
2248 err = ftrace_update_ftrace_func(ftrace_trace_function);
2249 if (FTRACE_WARN_ON(err))
2250 return;
2251 }
2252
2253 if (command & FTRACE_START_FUNC_RET)
2254 err = ftrace_enable_ftrace_graph_caller();
2255 else if (command & FTRACE_STOP_FUNC_RET)
2256 err = ftrace_disable_ftrace_graph_caller();
2257 FTRACE_WARN_ON(err);
2258 }
2259
2260 static int __ftrace_modify_code(void *data)
2261 {
2262 int *command = data;
2263
2264 ftrace_modify_all_code(*command);
2265
2266 return 0;
2267 }
2268
2269 /**
2270 * ftrace_run_stop_machine, go back to the stop machine method
2271 * @command: The command to tell ftrace what to do
2272 *
2273 * If an arch needs to fall back to the stop machine method, the
2274 * it can call this function.
2275 */
2276 void ftrace_run_stop_machine(int command)
2277 {
2278 stop_machine(__ftrace_modify_code, &command, NULL);
2279 }
2280
2281 /**
2282 * arch_ftrace_update_code, modify the code to trace or not trace
2283 * @command: The command that needs to be done
2284 *
2285 * Archs can override this function if it does not need to
2286 * run stop_machine() to modify code.
2287 */
2288 void __weak arch_ftrace_update_code(int command)
2289 {
2290 ftrace_run_stop_machine(command);
2291 }
2292
2293 static void ftrace_run_update_code(int command)
2294 {
2295 int ret;
2296
2297 ret = ftrace_arch_code_modify_prepare();
2298 FTRACE_WARN_ON(ret);
2299 if (ret)
2300 return;
2301
2302 /*
2303 * By default we use stop_machine() to modify the code.
2304 * But archs can do what ever they want as long as it
2305 * is safe. The stop_machine() is the safest, but also
2306 * produces the most overhead.
2307 */
2308 arch_ftrace_update_code(command);
2309
2310 ret = ftrace_arch_code_modify_post_process();
2311 FTRACE_WARN_ON(ret);
2312 }
2313
2314 static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
2315 struct ftrace_hash *old_hash)
2316 {
2317 ops->flags |= FTRACE_OPS_FL_MODIFYING;
2318 ops->old_hash.filter_hash = old_hash;
2319 ftrace_run_update_code(command);
2320 ops->old_hash.filter_hash = NULL;
2321 ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
2322 }
2323
2324 static ftrace_func_t saved_ftrace_func;
2325 static int ftrace_start_up;
2326
2327 static void control_ops_free(struct ftrace_ops *ops)
2328 {
2329 free_percpu(ops->disabled);
2330 }
2331
2332 static void ftrace_startup_enable(int command)
2333 {
2334 if (saved_ftrace_func != ftrace_trace_function) {
2335 saved_ftrace_func = ftrace_trace_function;
2336 command |= FTRACE_UPDATE_TRACE_FUNC;
2337 }
2338
2339 if (!command || !ftrace_enabled)
2340 return;
2341
2342 ftrace_run_update_code(command);
2343 }
2344
2345 static void ftrace_startup_all(int command)
2346 {
2347 update_all_ops = true;
2348 ftrace_startup_enable(command);
2349 update_all_ops = false;
2350 }
2351
2352 static int ftrace_startup(struct ftrace_ops *ops, int command)
2353 {
2354 int ret;
2355
2356 if (unlikely(ftrace_disabled))
2357 return -ENODEV;
2358
2359 ret = __register_ftrace_function(ops);
2360 if (ret)
2361 return ret;
2362
2363 ftrace_start_up++;
2364 command |= FTRACE_UPDATE_CALLS;
2365
2366 /*
2367 * Note that ftrace probes uses this to start up
2368 * and modify functions it will probe. But we still
2369 * set the ADDING flag for modification, as probes
2370 * do not have trampolines. If they add them in the
2371 * future, then the probes will need to distinguish
2372 * between adding and updating probes.
2373 */
2374 ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
2375
2376 ftrace_hash_rec_enable(ops, 1);
2377
2378 ftrace_startup_enable(command);
2379
2380 ops->flags &= ~FTRACE_OPS_FL_ADDING;
2381
2382 return 0;
2383 }
2384
2385 static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2386 {
2387 int ret;
2388
2389 if (unlikely(ftrace_disabled))
2390 return -ENODEV;
2391
2392 ret = __unregister_ftrace_function(ops);
2393 if (ret)
2394 return ret;
2395
2396 ftrace_start_up--;
2397 /*
2398 * Just warn in case of unbalance, no need to kill ftrace, it's not
2399 * critical but the ftrace_call callers may be never nopped again after
2400 * further ftrace uses.
2401 */
2402 WARN_ON_ONCE(ftrace_start_up < 0);
2403
2404 ftrace_hash_rec_disable(ops, 1);
2405
2406 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2407
2408 command |= FTRACE_UPDATE_CALLS;
2409
2410 if (saved_ftrace_func != ftrace_trace_function) {
2411 saved_ftrace_func = ftrace_trace_function;
2412 command |= FTRACE_UPDATE_TRACE_FUNC;
2413 }
2414
2415 if (!command || !ftrace_enabled) {
2416 /*
2417 * If these are control ops, they still need their
2418 * per_cpu field freed. Since, function tracing is
2419 * not currently active, we can just free them
2420 * without synchronizing all CPUs.
2421 */
2422 if (ops->flags & FTRACE_OPS_FL_CONTROL)
2423 control_ops_free(ops);
2424 return 0;
2425 }
2426
2427 /*
2428 * If the ops uses a trampoline, then it needs to be
2429 * tested first on update.
2430 */
2431 ops->flags |= FTRACE_OPS_FL_REMOVING;
2432 removed_ops = ops;
2433
2434 /* The trampoline logic checks the old hashes */
2435 ops->old_hash.filter_hash = ops->func_hash->filter_hash;
2436 ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
2437
2438 ftrace_run_update_code(command);
2439
2440 /*
2441 * If there's no more ops registered with ftrace, run a
2442 * sanity check to make sure all rec flags are cleared.
2443 */
2444 if (ftrace_ops_list == &ftrace_list_end) {
2445 struct ftrace_page *pg;
2446 struct dyn_ftrace *rec;
2447
2448 do_for_each_ftrace_rec(pg, rec) {
2449 if (FTRACE_WARN_ON_ONCE(rec->flags))
2450 pr_warn(" %pS flags:%lx\n",
2451 (void *)rec->ip, rec->flags);
2452 } while_for_each_ftrace_rec();
2453 }
2454
2455 ops->old_hash.filter_hash = NULL;
2456 ops->old_hash.notrace_hash = NULL;
2457
2458 removed_ops = NULL;
2459 ops->flags &= ~FTRACE_OPS_FL_REMOVING;
2460
2461 /*
2462 * Dynamic ops may be freed, we must make sure that all
2463 * callers are done before leaving this function.
2464 * The same goes for freeing the per_cpu data of the control
2465 * ops.
2466 *
2467 * Again, normal synchronize_sched() is not good enough.
2468 * We need to do a hard force of sched synchronization.
2469 * This is because we use preempt_disable() to do RCU, but
2470 * the function tracers can be called where RCU is not watching
2471 * (like before user_exit()). We can not rely on the RCU
2472 * infrastructure to do the synchronization, thus we must do it
2473 * ourselves.
2474 */
2475 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
2476 schedule_on_each_cpu(ftrace_sync);
2477
2478 if (ops->flags & FTRACE_OPS_FL_CONTROL)
2479 control_ops_free(ops);
2480 }
2481
2482 return 0;
2483 }
2484
2485 static void ftrace_startup_sysctl(void)
2486 {
2487 if (unlikely(ftrace_disabled))
2488 return;
2489
2490 /* Force update next time */
2491 saved_ftrace_func = NULL;
2492 /* ftrace_start_up is true if we want ftrace running */
2493 if (ftrace_start_up)
2494 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
2495 }
2496
2497 static void ftrace_shutdown_sysctl(void)
2498 {
2499 if (unlikely(ftrace_disabled))
2500 return;
2501
2502 /* ftrace_start_up is true if ftrace is running */
2503 if (ftrace_start_up)
2504 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
2505 }
2506
2507 static cycle_t ftrace_update_time;
2508 unsigned long ftrace_update_tot_cnt;
2509
2510 static inline int ops_traces_mod(struct ftrace_ops *ops)
2511 {
2512 /*
2513 * Filter_hash being empty will default to trace module.
2514 * But notrace hash requires a test of individual module functions.
2515 */
2516 return ftrace_hash_empty(ops->func_hash->filter_hash) &&
2517 ftrace_hash_empty(ops->func_hash->notrace_hash);
2518 }
2519
2520 /*
2521 * Check if the current ops references the record.
2522 *
2523 * If the ops traces all functions, then it was already accounted for.
2524 * If the ops does not trace the current record function, skip it.
2525 * If the ops ignores the function via notrace filter, skip it.
2526 */
2527 static inline bool
2528 ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
2529 {
2530 /* If ops isn't enabled, ignore it */
2531 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
2532 return 0;
2533
2534 /* If ops traces all mods, we already accounted for it */
2535 if (ops_traces_mod(ops))
2536 return 0;
2537
2538 /* The function must be in the filter */
2539 if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
2540 !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
2541 return 0;
2542
2543 /* If in notrace hash, we ignore it too */
2544 if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
2545 return 0;
2546
2547 return 1;
2548 }
2549
2550 static int referenced_filters(struct dyn_ftrace *rec)
2551 {
2552 struct ftrace_ops *ops;
2553 int cnt = 0;
2554
2555 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
2556 if (ops_references_rec(ops, rec))
2557 cnt++;
2558 }
2559
2560 return cnt;
2561 }
2562
2563 static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
2564 {
2565 struct ftrace_page *pg;
2566 struct dyn_ftrace *p;
2567 cycle_t start, stop;
2568 unsigned long update_cnt = 0;
2569 unsigned long ref = 0;
2570 bool test = false;
2571 int i;
2572
2573 /*
2574 * When adding a module, we need to check if tracers are
2575 * currently enabled and if they are set to trace all functions.
2576 * If they are, we need to enable the module functions as well
2577 * as update the reference counts for those function records.
2578 */
2579 if (mod) {
2580 struct ftrace_ops *ops;
2581
2582 for (ops = ftrace_ops_list;
2583 ops != &ftrace_list_end; ops = ops->next) {
2584 if (ops->flags & FTRACE_OPS_FL_ENABLED) {
2585 if (ops_traces_mod(ops))
2586 ref++;
2587 else
2588 test = true;
2589 }
2590 }
2591 }
2592
2593 start = ftrace_now(raw_smp_processor_id());
2594
2595 for (pg = new_pgs; pg; pg = pg->next) {
2596
2597 for (i = 0; i < pg->index; i++) {
2598 int cnt = ref;
2599
2600 /* If something went wrong, bail without enabling anything */
2601 if (unlikely(ftrace_disabled))
2602 return -1;
2603
2604 p = &pg->records[i];
2605 if (test)
2606 cnt += referenced_filters(p);
2607 p->flags = cnt;
2608
2609 /*
2610 * Do the initial record conversion from mcount jump
2611 * to the NOP instructions.
2612 */
2613 if (!ftrace_code_disable(mod, p))
2614 break;
2615
2616 update_cnt++;
2617
2618 /*
2619 * If the tracing is enabled, go ahead and enable the record.
2620 *
2621 * The reason not to enable the record immediatelly is the
2622 * inherent check of ftrace_make_nop/ftrace_make_call for
2623 * correct previous instructions. Making first the NOP
2624 * conversion puts the module to the correct state, thus
2625 * passing the ftrace_make_call check.
2626 */
2627 if (ftrace_start_up && cnt) {
2628 int failed = __ftrace_replace_code(p, 1);
2629 if (failed)
2630 ftrace_bug(failed, p->ip);
2631 }
2632 }
2633 }
2634
2635 stop = ftrace_now(raw_smp_processor_id());
2636 ftrace_update_time = stop - start;
2637 ftrace_update_tot_cnt += update_cnt;
2638
2639 return 0;
2640 }
2641
2642 static int ftrace_allocate_records(struct ftrace_page *pg, int count)
2643 {
2644 int order;
2645 int cnt;
2646
2647 if (WARN_ON(!count))
2648 return -EINVAL;
2649
2650 order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
2651
2652 /*
2653 * We want to fill as much as possible. No more than a page
2654 * may be empty.
2655 */
2656 while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2657 order--;
2658
2659 again:
2660 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2661
2662 if (!pg->records) {
2663 /* if we can't allocate this size, try something smaller */
2664 if (!order)
2665 return -ENOMEM;
2666 order >>= 1;
2667 goto again;
2668 }
2669
2670 cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
2671 pg->size = cnt;
2672
2673 if (cnt > count)
2674 cnt = count;
2675
2676 return cnt;
2677 }
2678
2679 static struct ftrace_page *
2680 ftrace_allocate_pages(unsigned long num_to_init)
2681 {
2682 struct ftrace_page *start_pg;
2683 struct ftrace_page *pg;
2684 int order;
2685 int cnt;
2686
2687 if (!num_to_init)
2688 return 0;
2689
2690 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
2691 if (!pg)
2692 return NULL;
2693
2694 /*
2695 * Try to allocate as much as possible in one continues
2696 * location that fills in all of the space. We want to
2697 * waste as little space as possible.
2698 */
2699 for (;;) {
2700 cnt = ftrace_allocate_records(pg, num_to_init);
2701 if (cnt < 0)
2702 goto free_pages;
2703
2704 num_to_init -= cnt;
2705 if (!num_to_init)
2706 break;
2707
2708 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
2709 if (!pg->next)
2710 goto free_pages;
2711
2712 pg = pg->next;
2713 }
2714
2715 return start_pg;
2716
2717 free_pages:
2718 pg = start_pg;
2719 while (pg) {
2720 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
2721 free_pages((unsigned long)pg->records, order);
2722 start_pg = pg->next;
2723 kfree(pg);
2724 pg = start_pg;
2725 }
2726 pr_info("ftrace: FAILED to allocate memory for functions\n");
2727 return NULL;
2728 }
2729
2730 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2731
2732 struct ftrace_iterator {
2733 loff_t pos;
2734 loff_t func_pos;
2735 struct ftrace_page *pg;
2736 struct dyn_ftrace *func;
2737 struct ftrace_func_probe *probe;
2738 struct trace_parser parser;
2739 struct ftrace_hash *hash;
2740 struct ftrace_ops *ops;
2741 int hidx;
2742 int idx;
2743 unsigned flags;
2744 };
2745
2746 static void *
2747 t_hash_next(struct seq_file *m, loff_t *pos)
2748 {
2749 struct ftrace_iterator *iter = m->private;
2750 struct hlist_node *hnd = NULL;
2751 struct hlist_head *hhd;
2752
2753 (*pos)++;
2754 iter->pos = *pos;
2755
2756 if (iter->probe)
2757 hnd = &iter->probe->node;
2758 retry:
2759 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
2760 return NULL;
2761
2762 hhd = &ftrace_func_hash[iter->hidx];
2763
2764 if (hlist_empty(hhd)) {
2765 iter->hidx++;
2766 hnd = NULL;
2767 goto retry;
2768 }
2769
2770 if (!hnd)
2771 hnd = hhd->first;
2772 else {
2773 hnd = hnd->next;
2774 if (!hnd) {
2775 iter->hidx++;
2776 goto retry;
2777 }
2778 }
2779
2780 if (WARN_ON_ONCE(!hnd))
2781 return NULL;
2782
2783 iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
2784
2785 return iter;
2786 }
2787
2788 static void *t_hash_start(struct seq_file *m, loff_t *pos)
2789 {
2790 struct ftrace_iterator *iter = m->private;
2791 void *p = NULL;
2792 loff_t l;
2793
2794 if (!(iter->flags & FTRACE_ITER_DO_HASH))
2795 return NULL;
2796
2797 if (iter->func_pos > *pos)
2798 return NULL;
2799
2800 iter->hidx = 0;
2801 for (l = 0; l <= (*pos - iter->func_pos); ) {
2802 p = t_hash_next(m, &l);
2803 if (!p)
2804 break;
2805 }
2806 if (!p)
2807 return NULL;
2808
2809 /* Only set this if we have an item */
2810 iter->flags |= FTRACE_ITER_HASH;
2811
2812 return iter;
2813 }
2814
2815 static int
2816 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
2817 {
2818 struct ftrace_func_probe *rec;
2819
2820 rec = iter->probe;
2821 if (WARN_ON_ONCE(!rec))
2822 return -EIO;
2823
2824 if (rec->ops->print)
2825 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
2826
2827 seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
2828
2829 if (rec->data)
2830 seq_printf(m, ":%p", rec->data);
2831 seq_putc(m, '\n');
2832
2833 return 0;
2834 }
2835
2836 static void *
2837 t_next(struct seq_file *m, void *v, loff_t *pos)
2838 {
2839 struct ftrace_iterator *iter = m->private;
2840 struct ftrace_ops *ops = iter->ops;
2841 struct dyn_ftrace *rec = NULL;
2842
2843 if (unlikely(ftrace_disabled))
2844 return NULL;
2845
2846 if (iter->flags & FTRACE_ITER_HASH)
2847 return t_hash_next(m, pos);
2848
2849 (*pos)++;
2850 iter->pos = iter->func_pos = *pos;
2851
2852 if (iter->flags & FTRACE_ITER_PRINTALL)
2853 return t_hash_start(m, pos);
2854
2855 retry:
2856 if (iter->idx >= iter->pg->index) {
2857 if (iter->pg->next) {
2858 iter->pg = iter->pg->next;
2859 iter->idx = 0;
2860 goto retry;
2861 }
2862 } else {
2863 rec = &iter->pg->records[iter->idx++];
2864 if (((iter->flags & FTRACE_ITER_FILTER) &&
2865 !(ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))) ||
2866
2867 ((iter->flags & FTRACE_ITER_NOTRACE) &&
2868 !ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) ||
2869
2870 ((iter->flags & FTRACE_ITER_ENABLED) &&
2871 !(rec->flags & FTRACE_FL_ENABLED))) {
2872
2873 rec = NULL;
2874 goto retry;
2875 }
2876 }
2877
2878 if (!rec)
2879 return t_hash_start(m, pos);
2880
2881 iter->func = rec;
2882
2883 return iter;
2884 }
2885
2886 static void reset_iter_read(struct ftrace_iterator *iter)
2887 {
2888 iter->pos = 0;
2889 iter->func_pos = 0;
2890 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
2891 }
2892
2893 static void *t_start(struct seq_file *m, loff_t *pos)
2894 {
2895 struct ftrace_iterator *iter = m->private;
2896 struct ftrace_ops *ops = iter->ops;
2897 void *p = NULL;
2898 loff_t l;
2899
2900 mutex_lock(&ftrace_lock);
2901
2902 if (unlikely(ftrace_disabled))
2903 return NULL;
2904
2905 /*
2906 * If an lseek was done, then reset and start from beginning.
2907 */
2908 if (*pos < iter->pos)
2909 reset_iter_read(iter);
2910
2911 /*
2912 * For set_ftrace_filter reading, if we have the filter
2913 * off, we can short cut and just print out that all
2914 * functions are enabled.
2915 */
2916 if ((iter->flags & FTRACE_ITER_FILTER &&
2917 ftrace_hash_empty(ops->func_hash->filter_hash)) ||
2918 (iter->flags & FTRACE_ITER_NOTRACE &&
2919 ftrace_hash_empty(ops->func_hash->notrace_hash))) {
2920 if (*pos > 0)
2921 return t_hash_start(m, pos);
2922 iter->flags |= FTRACE_ITER_PRINTALL;
2923 /* reset in case of seek/pread */
2924 iter->flags &= ~FTRACE_ITER_HASH;
2925 return iter;
2926 }
2927
2928 if (iter->flags & FTRACE_ITER_HASH)
2929 return t_hash_start(m, pos);
2930
2931 /*
2932 * Unfortunately, we need to restart at ftrace_pages_start
2933 * every time we let go of the ftrace_mutex. This is because
2934 * those pointers can change without the lock.
2935 */
2936 iter->pg = ftrace_pages_start;
2937 iter->idx = 0;
2938 for (l = 0; l <= *pos; ) {
2939 p = t_next(m, p, &l);
2940 if (!p)
2941 break;
2942 }
2943
2944 if (!p)
2945 return t_hash_start(m, pos);
2946
2947 return iter;
2948 }
2949
2950 static void t_stop(struct seq_file *m, void *p)
2951 {
2952 mutex_unlock(&ftrace_lock);
2953 }
2954
2955 static int t_show(struct seq_file *m, void *v)
2956 {
2957 struct ftrace_iterator *iter = m->private;
2958 struct dyn_ftrace *rec;
2959
2960 if (iter->flags & FTRACE_ITER_HASH)
2961 return t_hash_show(m, iter);
2962
2963 if (iter->flags & FTRACE_ITER_PRINTALL) {
2964 if (iter->flags & FTRACE_ITER_NOTRACE)
2965 seq_printf(m, "#### no functions disabled ####\n");
2966 else
2967 seq_printf(m, "#### all functions enabled ####\n");
2968 return 0;
2969 }
2970
2971 rec = iter->func;
2972
2973 if (!rec)
2974 return 0;
2975
2976 seq_printf(m, "%ps", (void *)rec->ip);
2977 if (iter->flags & FTRACE_ITER_ENABLED) {
2978 seq_printf(m, " (%ld)%s",
2979 ftrace_rec_count(rec),
2980 rec->flags & FTRACE_FL_REGS ? " R" : " ");
2981 if (rec->flags & FTRACE_FL_TRAMP_EN) {
2982 struct ftrace_ops *ops;
2983
2984 ops = ftrace_find_tramp_ops_any(rec);
2985 if (ops)
2986 seq_printf(m, "\ttramp: %pS",
2987 (void *)ops->trampoline);
2988 else
2989 seq_printf(m, "\ttramp: ERROR!");
2990 }
2991 }
2992
2993 seq_printf(m, "\n");
2994
2995 return 0;
2996 }
2997
2998 static const struct seq_operations show_ftrace_seq_ops = {
2999 .start = t_start,
3000 .next = t_next,
3001 .stop = t_stop,
3002 .show = t_show,
3003 };
3004
3005 static int
3006 ftrace_avail_open(struct inode *inode, struct file *file)
3007 {
3008 struct ftrace_iterator *iter;
3009
3010 if (unlikely(ftrace_disabled))
3011 return -ENODEV;
3012
3013 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3014 if (iter) {
3015 iter->pg = ftrace_pages_start;
3016 iter->ops = &global_ops;
3017 }
3018
3019 return iter ? 0 : -ENOMEM;
3020 }
3021
3022 static int
3023 ftrace_enabled_open(struct inode *inode, struct file *file)
3024 {
3025 struct ftrace_iterator *iter;
3026
3027 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3028 if (iter) {
3029 iter->pg = ftrace_pages_start;
3030 iter->flags = FTRACE_ITER_ENABLED;
3031 iter->ops = &global_ops;
3032 }
3033
3034 return iter ? 0 : -ENOMEM;
3035 }
3036
3037 /**
3038 * ftrace_regex_open - initialize function tracer filter files
3039 * @ops: The ftrace_ops that hold the hash filters
3040 * @flag: The type of filter to process
3041 * @inode: The inode, usually passed in to your open routine
3042 * @file: The file, usually passed in to your open routine
3043 *
3044 * ftrace_regex_open() initializes the filter files for the
3045 * @ops. Depending on @flag it may process the filter hash or
3046 * the notrace hash of @ops. With this called from the open
3047 * routine, you can use ftrace_filter_write() for the write
3048 * routine if @flag has FTRACE_ITER_FILTER set, or
3049 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
3050 * tracing_lseek() should be used as the lseek routine, and
3051 * release must call ftrace_regex_release().
3052 */
3053 int
3054 ftrace_regex_open(struct ftrace_ops *ops, int flag,
3055 struct inode *inode, struct file *file)
3056 {
3057 struct ftrace_iterator *iter;
3058 struct ftrace_hash *hash;
3059 int ret = 0;
3060
3061 ftrace_ops_init(ops);
3062
3063 if (unlikely(ftrace_disabled))
3064 return -ENODEV;
3065
3066 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3067 if (!iter)
3068 return -ENOMEM;
3069
3070 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
3071 kfree(iter);
3072 return -ENOMEM;
3073 }
3074
3075 iter->ops = ops;
3076 iter->flags = flag;
3077
3078 mutex_lock(&ops->func_hash->regex_lock);
3079
3080 if (flag & FTRACE_ITER_NOTRACE)
3081 hash = ops->func_hash->notrace_hash;
3082 else
3083 hash = ops->func_hash->filter_hash;
3084
3085 if (file->f_mode & FMODE_WRITE) {
3086 const int size_bits = FTRACE_HASH_DEFAULT_BITS;
3087
3088 if (file->f_flags & O_TRUNC)
3089 iter->hash = alloc_ftrace_hash(size_bits);
3090 else
3091 iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
3092
3093 if (!iter->hash) {
3094 trace_parser_put(&iter->parser);
3095 kfree(iter);
3096 ret = -ENOMEM;
3097 goto out_unlock;
3098 }
3099 }
3100
3101 if (file->f_mode & FMODE_READ) {
3102 iter->pg = ftrace_pages_start;
3103
3104 ret = seq_open(file, &show_ftrace_seq_ops);
3105 if (!ret) {
3106 struct seq_file *m = file->private_data;
3107 m->private = iter;
3108 } else {
3109 /* Failed */
3110 free_ftrace_hash(iter->hash);
3111 trace_parser_put(&iter->parser);
3112 kfree(iter);
3113 }
3114 } else
3115 file->private_data = iter;
3116
3117 out_unlock:
3118 mutex_unlock(&ops->func_hash->regex_lock);
3119
3120 return ret;
3121 }
3122
3123 static int
3124 ftrace_filter_open(struct inode *inode, struct file *file)
3125 {
3126 struct ftrace_ops *ops = inode->i_private;
3127
3128 return ftrace_regex_open(ops,
3129 FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
3130 inode, file);
3131 }
3132
3133 static int
3134 ftrace_notrace_open(struct inode *inode, struct file *file)
3135 {
3136 struct ftrace_ops *ops = inode->i_private;
3137
3138 return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
3139 inode, file);
3140 }
3141
3142 static int ftrace_match(char *str, char *regex, int len, int type)
3143 {
3144 int matched = 0;
3145 int slen;
3146
3147 switch (type) {
3148 case MATCH_FULL:
3149 if (strcmp(str, regex) == 0)
3150 matched = 1;
3151 break;
3152 case MATCH_FRONT_ONLY:
3153 if (strncmp(str, regex, len) == 0)
3154 matched = 1;
3155 break;
3156 case MATCH_MIDDLE_ONLY:
3157 if (strstr(str, regex))
3158 matched = 1;
3159 break;
3160 case MATCH_END_ONLY:
3161 slen = strlen(str);
3162 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
3163 matched = 1;
3164 break;
3165 }
3166
3167 return matched;
3168 }
3169
3170 static int
3171 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
3172 {
3173 struct ftrace_func_entry *entry;
3174 int ret = 0;
3175
3176 entry = ftrace_lookup_ip(hash, rec->ip);
3177 if (not) {
3178 /* Do nothing if it doesn't exist */
3179 if (!entry)
3180 return 0;
3181
3182 free_hash_entry(hash, entry);
3183 } else {
3184 /* Do nothing if it exists */
3185 if (entry)
3186 return 0;
3187
3188 ret = add_hash_entry(hash, rec->ip);
3189 }
3190 return ret;
3191 }
3192
3193 static int
3194 ftrace_match_record(struct dyn_ftrace *rec, char *mod,
3195 char *regex, int len, int type)
3196 {
3197 char str[KSYM_SYMBOL_LEN];
3198 char *modname;
3199
3200 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
3201
3202 if (mod) {
3203 /* module lookup requires matching the module */
3204 if (!modname || strcmp(modname, mod))
3205 return 0;
3206
3207 /* blank search means to match all funcs in the mod */
3208 if (!len)
3209 return 1;
3210 }
3211
3212 return ftrace_match(str, regex, len, type);
3213 }
3214
3215 static int
3216 match_records(struct ftrace_hash *hash, char *buff,
3217 int len, char *mod, int not)
3218 {
3219 unsigned search_len = 0;
3220 struct ftrace_page *pg;
3221 struct dyn_ftrace *rec;
3222 int type = MATCH_FULL;
3223 char *search = buff;
3224 int found = 0;
3225 int ret;
3226
3227 if (len) {
3228 type = filter_parse_regex(buff, len, &search, &not);
3229 search_len = strlen(search);
3230 }
3231
3232 mutex_lock(&ftrace_lock);
3233
3234 if (unlikely(ftrace_disabled))
3235 goto out_unlock;
3236
3237 do_for_each_ftrace_rec(pg, rec) {
3238 if (ftrace_match_record(rec, mod, search, search_len, type)) {
3239 ret = enter_record(hash, rec, not);
3240 if (ret < 0) {
3241 found = ret;
3242 goto out_unlock;
3243 }
3244 found = 1;
3245 }
3246 } while_for_each_ftrace_rec();
3247 out_unlock:
3248 mutex_unlock(&ftrace_lock);
3249
3250 return found;
3251 }
3252
3253 static int
3254 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
3255 {
3256 return match_records(hash, buff, len, NULL, 0);
3257 }
3258
3259 static int
3260 ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
3261 {
3262 int not = 0;
3263
3264 /* blank or '*' mean the same */
3265 if (strcmp(buff, "*") == 0)
3266 buff[0] = 0;
3267
3268 /* handle the case of 'dont filter this module' */
3269 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
3270 buff[0] = 0;
3271 not = 1;
3272 }
3273
3274 return match_records(hash, buff, strlen(buff), mod, not);
3275 }
3276
3277 /*
3278 * We register the module command as a template to show others how
3279 * to register the a command as well.
3280 */
3281
3282 static int
3283 ftrace_mod_callback(struct ftrace_hash *hash,
3284 char *func, char *cmd, char *param, int enable)
3285 {
3286 char *mod;
3287 int ret = -EINVAL;
3288
3289 /*
3290 * cmd == 'mod' because we only registered this func
3291 * for the 'mod' ftrace_func_command.
3292 * But if you register one func with multiple commands,
3293 * you can tell which command was used by the cmd
3294 * parameter.
3295 */
3296
3297 /* we must have a module name */
3298 if (!param)
3299 return ret;
3300
3301 mod = strsep(&param, ":");
3302 if (!strlen(mod))
3303 return ret;
3304
3305 ret = ftrace_match_module_records(hash, func, mod);
3306 if (!ret)
3307 ret = -EINVAL;
3308 if (ret < 0)
3309 return ret;
3310
3311 return 0;
3312 }
3313
3314 static struct ftrace_func_command ftrace_mod_cmd = {
3315 .name = "mod",
3316 .func = ftrace_mod_callback,
3317 };
3318
3319 static int __init ftrace_mod_cmd_init(void)
3320 {
3321 return register_ftrace_command(&ftrace_mod_cmd);
3322 }
3323 core_initcall(ftrace_mod_cmd_init);
3324
3325 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
3326 struct ftrace_ops *op, struct pt_regs *pt_regs)
3327 {
3328 struct ftrace_func_probe *entry;
3329 struct hlist_head *hhd;
3330 unsigned long key;
3331
3332 key = hash_long(ip, FTRACE_HASH_BITS);
3333
3334 hhd = &ftrace_func_hash[key];
3335
3336 if (hlist_empty(hhd))
3337 return;
3338
3339 /*
3340 * Disable preemption for these calls to prevent a RCU grace
3341 * period. This syncs the hash iteration and freeing of items
3342 * on the hash. rcu_read_lock is too dangerous here.
3343 */
3344 preempt_disable_notrace();
3345 hlist_for_each_entry_rcu_notrace(entry, hhd, node) {
3346 if (entry->ip == ip)
3347 entry->ops->func(ip, parent_ip, &entry->data);
3348 }
3349 preempt_enable_notrace();
3350 }
3351
3352 static struct ftrace_ops trace_probe_ops __read_mostly =
3353 {
3354 .func = function_trace_probe_call,
3355 .flags = FTRACE_OPS_FL_INITIALIZED,
3356 INIT_OPS_HASH(trace_probe_ops)
3357 };
3358
3359 static int ftrace_probe_registered;
3360
3361 static void __enable_ftrace_function_probe(struct ftrace_hash *old_hash)
3362 {
3363 int ret;
3364 int i;
3365
3366 if (ftrace_probe_registered) {
3367 /* still need to update the function call sites */
3368 if (ftrace_enabled)
3369 ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS,
3370 old_hash);
3371 return;
3372 }
3373
3374 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3375 struct hlist_head *hhd = &ftrace_func_hash[i];
3376 if (hhd->first)
3377 break;
3378 }
3379 /* Nothing registered? */
3380 if (i == FTRACE_FUNC_HASHSIZE)
3381 return;
3382
3383 ret = ftrace_startup(&trace_probe_ops, 0);
3384
3385 ftrace_probe_registered = 1;
3386 }
3387
3388 static void __disable_ftrace_function_probe(void)
3389 {
3390 int i;
3391
3392 if (!ftrace_probe_registered)
3393 return;
3394
3395 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3396 struct hlist_head *hhd = &ftrace_func_hash[i];
3397 if (hhd->first)
3398 return;
3399 }
3400
3401 /* no more funcs left */
3402 ftrace_shutdown(&trace_probe_ops, 0);
3403
3404 ftrace_probe_registered = 0;
3405 }
3406
3407
3408 static void ftrace_free_entry(struct ftrace_func_probe *entry)
3409 {
3410 if (entry->ops->free)
3411 entry->ops->free(entry->ops, entry->ip, &entry->data);
3412 kfree(entry);
3413 }
3414
3415 int
3416 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3417 void *data)
3418 {
3419 struct ftrace_func_probe *entry;
3420 struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
3421 struct ftrace_hash *old_hash = *orig_hash;
3422 struct ftrace_hash *hash;
3423 struct ftrace_page *pg;
3424 struct dyn_ftrace *rec;
3425 int type, len, not;
3426 unsigned long key;
3427 int count = 0;
3428 char *search;
3429 int ret;
3430
3431 type = filter_parse_regex(glob, strlen(glob), &search, &not);
3432 len = strlen(search);
3433
3434 /* we do not support '!' for function probes */
3435 if (WARN_ON(not))
3436 return -EINVAL;
3437
3438 mutex_lock(&trace_probe_ops.func_hash->regex_lock);
3439
3440 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
3441 if (!hash) {
3442 count = -ENOMEM;
3443 goto out;
3444 }
3445
3446 if (unlikely(ftrace_disabled)) {
3447 count = -ENODEV;
3448 goto out;
3449 }
3450
3451 mutex_lock(&ftrace_lock);
3452
3453 do_for_each_ftrace_rec(pg, rec) {
3454
3455 if (!ftrace_match_record(rec, NULL, search, len, type))
3456 continue;
3457
3458 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
3459 if (!entry) {
3460 /* If we did not process any, then return error */
3461 if (!count)
3462 count = -ENOMEM;
3463 goto out_unlock;
3464 }
3465
3466 count++;
3467
3468 entry->data = data;
3469
3470 /*
3471 * The caller might want to do something special
3472 * for each function we find. We call the callback
3473 * to give the caller an opportunity to do so.
3474 */
3475 if (ops->init) {
3476 if (ops->init(ops, rec->ip, &entry->data) < 0) {
3477 /* caller does not like this func */
3478 kfree(entry);
3479 continue;
3480 }
3481 }
3482
3483 ret = enter_record(hash, rec, 0);
3484 if (ret < 0) {
3485 kfree(entry);
3486 count = ret;
3487 goto out_unlock;
3488 }
3489
3490 entry->ops = ops;
3491 entry->ip = rec->ip;
3492
3493 key = hash_long(entry->ip, FTRACE_HASH_BITS);
3494 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
3495
3496 } while_for_each_ftrace_rec();
3497
3498 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3499
3500 __enable_ftrace_function_probe(old_hash);
3501
3502 if (!ret)
3503 free_ftrace_hash_rcu(old_hash);
3504 else
3505 count = ret;
3506
3507 out_unlock:
3508 mutex_unlock(&ftrace_lock);
3509 out:
3510 mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
3511 free_ftrace_hash(hash);
3512
3513 return count;
3514 }
3515
3516 enum {
3517 PROBE_TEST_FUNC = 1,
3518 PROBE_TEST_DATA = 2
3519 };
3520
3521 static void
3522 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3523 void *data, int flags)
3524 {
3525 struct ftrace_func_entry *rec_entry;
3526 struct ftrace_func_probe *entry;
3527 struct ftrace_func_probe *p;
3528 struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
3529 struct ftrace_hash *old_hash = *orig_hash;
3530 struct list_head free_list;
3531 struct ftrace_hash *hash;
3532 struct hlist_node *tmp;
3533 char str[KSYM_SYMBOL_LEN];
3534 int type = MATCH_FULL;
3535 int i, len = 0;
3536 char *search;
3537 int ret;
3538
3539 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
3540 glob = NULL;
3541 else if (glob) {
3542 int not;
3543
3544 type = filter_parse_regex(glob, strlen(glob), &search, &not);
3545 len = strlen(search);
3546
3547 /* we do not support '!' for function probes */
3548 if (WARN_ON(not))
3549 return;
3550 }
3551
3552 mutex_lock(&trace_probe_ops.func_hash->regex_lock);
3553
3554 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3555 if (!hash)
3556 /* Hmm, should report this somehow */
3557 goto out_unlock;
3558
3559 INIT_LIST_HEAD(&free_list);
3560
3561 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3562 struct hlist_head *hhd = &ftrace_func_hash[i];
3563
3564 hlist_for_each_entry_safe(entry, tmp, hhd, node) {
3565
3566 /* break up if statements for readability */
3567 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
3568 continue;
3569
3570 if ((flags & PROBE_TEST_DATA) && entry->data != data)
3571 continue;
3572
3573 /* do this last, since it is the most expensive */
3574 if (glob) {
3575 kallsyms_lookup(entry->ip, NULL, NULL,
3576 NULL, str);
3577 if (!ftrace_match(str, glob, len, type))
3578 continue;
3579 }
3580
3581 rec_entry = ftrace_lookup_ip(hash, entry->ip);
3582 /* It is possible more than one entry had this ip */
3583 if (rec_entry)
3584 free_hash_entry(hash, rec_entry);
3585
3586 hlist_del_rcu(&entry->node);
3587 list_add(&entry->free_list, &free_list);
3588 }
3589 }
3590 mutex_lock(&ftrace_lock);
3591 __disable_ftrace_function_probe();
3592 /*
3593 * Remove after the disable is called. Otherwise, if the last
3594 * probe is removed, a null hash means *all enabled*.
3595 */
3596 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3597 synchronize_sched();
3598 if (!ret)
3599 free_ftrace_hash_rcu(old_hash);
3600
3601 list_for_each_entry_safe(entry, p, &free_list, free_list) {
3602 list_del(&entry->free_list);
3603 ftrace_free_entry(entry);
3604 }
3605 mutex_unlock(&ftrace_lock);
3606
3607 out_unlock:
3608 mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
3609 free_ftrace_hash(hash);
3610 }
3611
3612 void
3613 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3614 void *data)
3615 {
3616 __unregister_ftrace_function_probe(glob, ops, data,
3617 PROBE_TEST_FUNC | PROBE_TEST_DATA);
3618 }
3619
3620 void
3621 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
3622 {
3623 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
3624 }
3625
3626 void unregister_ftrace_function_probe_all(char *glob)
3627 {
3628 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
3629 }
3630
3631 static LIST_HEAD(ftrace_commands);
3632 static DEFINE_MUTEX(ftrace_cmd_mutex);
3633
3634 /*
3635 * Currently we only register ftrace commands from __init, so mark this
3636 * __init too.
3637 */
3638 __init int register_ftrace_command(struct ftrace_func_command *cmd)
3639 {
3640 struct ftrace_func_command *p;
3641 int ret = 0;
3642
3643 mutex_lock(&ftrace_cmd_mutex);
3644 list_for_each_entry(p, &ftrace_commands, list) {
3645 if (strcmp(cmd->name, p->name) == 0) {
3646 ret = -EBUSY;
3647 goto out_unlock;
3648 }
3649 }
3650 list_add(&cmd->list, &ftrace_commands);
3651 out_unlock:
3652 mutex_unlock(&ftrace_cmd_mutex);
3653
3654 return ret;
3655 }
3656
3657 /*
3658 * Currently we only unregister ftrace commands from __init, so mark
3659 * this __init too.
3660 */
3661 __init int unregister_ftrace_command(struct ftrace_func_command *cmd)
3662 {
3663 struct ftrace_func_command *p, *n;
3664 int ret = -ENODEV;
3665
3666 mutex_lock(&ftrace_cmd_mutex);
3667 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
3668 if (strcmp(cmd->name, p->name) == 0) {
3669 ret = 0;
3670 list_del_init(&p->list);
3671 goto out_unlock;
3672 }
3673 }
3674 out_unlock:
3675 mutex_unlock(&ftrace_cmd_mutex);
3676
3677 return ret;
3678 }
3679
3680 static int ftrace_process_regex(struct ftrace_hash *hash,
3681 char *buff, int len, int enable)
3682 {
3683 char *func, *command, *next = buff;
3684 struct ftrace_func_command *p;
3685 int ret = -EINVAL;
3686
3687 func = strsep(&next, ":");
3688
3689 if (!next) {
3690 ret = ftrace_match_records(hash, func, len);
3691 if (!ret)
3692 ret = -EINVAL;
3693 if (ret < 0)
3694 return ret;
3695 return 0;
3696 }
3697
3698 /* command found */
3699
3700 command = strsep(&next, ":");
3701
3702 mutex_lock(&ftrace_cmd_mutex);
3703 list_for_each_entry(p, &ftrace_commands, list) {
3704 if (strcmp(p->name, command) == 0) {
3705 ret = p->func(hash, func, command, next, enable);
3706 goto out_unlock;
3707 }
3708 }
3709 out_unlock:
3710 mutex_unlock(&ftrace_cmd_mutex);
3711
3712 return ret;
3713 }
3714
3715 static ssize_t
3716 ftrace_regex_write(struct file *file, const char __user *ubuf,
3717 size_t cnt, loff_t *ppos, int enable)
3718 {
3719 struct ftrace_iterator *iter;
3720 struct trace_parser *parser;
3721 ssize_t ret, read;
3722
3723 if (!cnt)
3724 return 0;
3725
3726 if (file->f_mode & FMODE_READ) {
3727 struct seq_file *m = file->private_data;
3728 iter = m->private;
3729 } else
3730 iter = file->private_data;
3731
3732 if (unlikely(ftrace_disabled))
3733 return -ENODEV;
3734
3735 /* iter->hash is a local copy, so we don't need regex_lock */
3736
3737 parser = &iter->parser;
3738 read = trace_get_user(parser, ubuf, cnt, ppos);
3739
3740 if (read >= 0 && trace_parser_loaded(parser) &&
3741 !trace_parser_cont(parser)) {
3742 ret = ftrace_process_regex(iter->hash, parser->buffer,
3743 parser->idx, enable);
3744 trace_parser_clear(parser);
3745 if (ret < 0)
3746 goto out;
3747 }
3748
3749 ret = read;
3750 out:
3751 return ret;
3752 }
3753
3754 ssize_t
3755 ftrace_filter_write(struct file *file, const char __user *ubuf,
3756 size_t cnt, loff_t *ppos)
3757 {
3758 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
3759 }
3760
3761 ssize_t
3762 ftrace_notrace_write(struct file *file, const char __user *ubuf,
3763 size_t cnt, loff_t *ppos)
3764 {
3765 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
3766 }
3767
3768 static int
3769 ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
3770 {
3771 struct ftrace_func_entry *entry;
3772
3773 if (!ftrace_location(ip))
3774 return -EINVAL;
3775
3776 if (remove) {
3777 entry = ftrace_lookup_ip(hash, ip);
3778 if (!entry)
3779 return -ENOENT;
3780 free_hash_entry(hash, entry);
3781 return 0;
3782 }
3783
3784 return add_hash_entry(hash, ip);
3785 }
3786
3787 static void ftrace_ops_update_code(struct ftrace_ops *ops,
3788 struct ftrace_hash *old_hash)
3789 {
3790 if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled)
3791 ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
3792 }
3793
3794 static int
3795 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3796 unsigned long ip, int remove, int reset, int enable)
3797 {
3798 struct ftrace_hash **orig_hash;
3799 struct ftrace_hash *old_hash;
3800 struct ftrace_hash *hash;
3801 int ret;
3802
3803 if (unlikely(ftrace_disabled))
3804 return -ENODEV;
3805
3806 mutex_lock(&ops->func_hash->regex_lock);
3807
3808 if (enable)
3809 orig_hash = &ops->func_hash->filter_hash;
3810 else
3811 orig_hash = &ops->func_hash->notrace_hash;
3812
3813 if (reset)
3814 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
3815 else
3816 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3817
3818 if (!hash) {
3819 ret = -ENOMEM;
3820 goto out_regex_unlock;
3821 }
3822
3823 if (buf && !ftrace_match_records(hash, buf, len)) {
3824 ret = -EINVAL;
3825 goto out_regex_unlock;
3826 }
3827 if (ip) {
3828 ret = ftrace_match_addr(hash, ip, remove);
3829 if (ret < 0)
3830 goto out_regex_unlock;
3831 }
3832
3833 mutex_lock(&ftrace_lock);
3834 old_hash = *orig_hash;
3835 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3836 if (!ret) {
3837 ftrace_ops_update_code(ops, old_hash);
3838 free_ftrace_hash_rcu(old_hash);
3839 }
3840 mutex_unlock(&ftrace_lock);
3841
3842 out_regex_unlock:
3843 mutex_unlock(&ops->func_hash->regex_lock);
3844
3845 free_ftrace_hash(hash);
3846 return ret;
3847 }
3848
3849 static int
3850 ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
3851 int reset, int enable)
3852 {
3853 return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
3854 }
3855
3856 /**
3857 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
3858 * @ops - the ops to set the filter with
3859 * @ip - the address to add to or remove from the filter.
3860 * @remove - non zero to remove the ip from the filter
3861 * @reset - non zero to reset all filters before applying this filter.
3862 *
3863 * Filters denote which functions should be enabled when tracing is enabled
3864 * If @ip is NULL, it failes to update filter.
3865 */
3866 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
3867 int remove, int reset)
3868 {
3869 ftrace_ops_init(ops);
3870 return ftrace_set_addr(ops, ip, remove, reset, 1);
3871 }
3872 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
3873
3874 static int
3875 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3876 int reset, int enable)
3877 {
3878 return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
3879 }
3880
3881 /**
3882 * ftrace_set_filter - set a function to filter on in ftrace
3883 * @ops - the ops to set the filter with
3884 * @buf - the string that holds the function filter text.
3885 * @len - the length of the string.
3886 * @reset - non zero to reset all filters before applying this filter.
3887 *
3888 * Filters denote which functions should be enabled when tracing is enabled.
3889 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3890 */
3891 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
3892 int len, int reset)
3893 {
3894 ftrace_ops_init(ops);
3895 return ftrace_set_regex(ops, buf, len, reset, 1);
3896 }
3897 EXPORT_SYMBOL_GPL(ftrace_set_filter);
3898
3899 /**
3900 * ftrace_set_notrace - set a function to not trace in ftrace
3901 * @ops - the ops to set the notrace filter with
3902 * @buf - the string that holds the function notrace text.
3903 * @len - the length of the string.
3904 * @reset - non zero to reset all filters before applying this filter.
3905 *
3906 * Notrace Filters denote which functions should not be enabled when tracing
3907 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3908 * for tracing.
3909 */
3910 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
3911 int len, int reset)
3912 {
3913 ftrace_ops_init(ops);
3914 return ftrace_set_regex(ops, buf, len, reset, 0);
3915 }
3916 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
3917 /**
3918 * ftrace_set_global_filter - set a function to filter on with global tracers
3919 * @buf - the string that holds the function filter text.
3920 * @len - the length of the string.
3921 * @reset - non zero to reset all filters before applying this filter.
3922 *
3923 * Filters denote which functions should be enabled when tracing is enabled.
3924 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3925 */
3926 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
3927 {
3928 ftrace_set_regex(&global_ops, buf, len, reset, 1);
3929 }
3930 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
3931
3932 /**
3933 * ftrace_set_global_notrace - set a function to not trace with global tracers
3934 * @buf - the string that holds the function notrace text.
3935 * @len - the length of the string.
3936 * @reset - non zero to reset all filters before applying this filter.
3937 *
3938 * Notrace Filters denote which functions should not be enabled when tracing
3939 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3940 * for tracing.
3941 */
3942 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
3943 {
3944 ftrace_set_regex(&global_ops, buf, len, reset, 0);
3945 }
3946 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
3947
3948 /*
3949 * command line interface to allow users to set filters on boot up.
3950 */
3951 #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
3952 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3953 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3954
3955 /* Used by function selftest to not test if filter is set */
3956 bool ftrace_filter_param __initdata;
3957
3958 static int __init set_ftrace_notrace(char *str)
3959 {
3960 ftrace_filter_param = true;
3961 strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
3962 return 1;
3963 }
3964 __setup("ftrace_notrace=", set_ftrace_notrace);
3965
3966 static int __init set_ftrace_filter(char *str)
3967 {
3968 ftrace_filter_param = true;
3969 strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
3970 return 1;
3971 }
3972 __setup("ftrace_filter=", set_ftrace_filter);
3973
3974 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3975 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
3976 static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3977 static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer);
3978
3979 static unsigned long save_global_trampoline;
3980 static unsigned long save_global_flags;
3981
3982 static int __init set_graph_function(char *str)
3983 {
3984 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
3985 return 1;
3986 }
3987 __setup("ftrace_graph_filter=", set_graph_function);
3988
3989 static int __init set_graph_notrace_function(char *str)
3990 {
3991 strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
3992 return 1;
3993 }
3994 __setup("ftrace_graph_notrace=", set_graph_notrace_function);
3995
3996 static void __init set_ftrace_early_graph(char *buf, int enable)
3997 {
3998 int ret;
3999 char *func;
4000 unsigned long *table = ftrace_graph_funcs;
4001 int *count = &ftrace_graph_count;
4002
4003 if (!enable) {
4004 table = ftrace_graph_notrace_funcs;
4005 count = &ftrace_graph_notrace_count;
4006 }
4007
4008 while (buf) {
4009 func = strsep(&buf, ",");
4010 /* we allow only one expression at a time */
4011 ret = ftrace_set_func(table, count, FTRACE_GRAPH_MAX_FUNCS, func);
4012 if (ret)
4013 printk(KERN_DEBUG "ftrace: function %s not "
4014 "traceable\n", func);
4015 }
4016 }
4017 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4018
4019 void __init
4020 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
4021 {
4022 char *func;
4023
4024 ftrace_ops_init(ops);
4025
4026 while (buf) {
4027 func = strsep(&buf, ",");
4028 ftrace_set_regex(ops, func, strlen(func), 0, enable);
4029 }
4030 }
4031
4032 static void __init set_ftrace_early_filters(void)
4033 {
4034 if (ftrace_filter_buf[0])
4035 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
4036 if (ftrace_notrace_buf[0])
4037 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
4038 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4039 if (ftrace_graph_buf[0])
4040 set_ftrace_early_graph(ftrace_graph_buf, 1);
4041 if (ftrace_graph_notrace_buf[0])
4042 set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
4043 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4044 }
4045
4046 int ftrace_regex_release(struct inode *inode, struct file *file)
4047 {
4048 struct seq_file *m = (struct seq_file *)file->private_data;
4049 struct ftrace_iterator *iter;
4050 struct ftrace_hash **orig_hash;
4051 struct ftrace_hash *old_hash;
4052 struct trace_parser *parser;
4053 int filter_hash;
4054 int ret;
4055
4056 if (file->f_mode & FMODE_READ) {
4057 iter = m->private;
4058 seq_release(inode, file);
4059 } else
4060 iter = file->private_data;
4061
4062 parser = &iter->parser;
4063 if (trace_parser_loaded(parser)) {
4064 parser->buffer[parser->idx] = 0;
4065 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
4066 }
4067
4068 trace_parser_put(parser);
4069
4070 mutex_lock(&iter->ops->func_hash->regex_lock);
4071
4072 if (file->f_mode & FMODE_WRITE) {
4073 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
4074
4075 if (filter_hash)
4076 orig_hash = &iter->ops->func_hash->filter_hash;
4077 else
4078 orig_hash = &iter->ops->func_hash->notrace_hash;
4079
4080 mutex_lock(&ftrace_lock);
4081 old_hash = *orig_hash;
4082 ret = ftrace_hash_move(iter->ops, filter_hash,
4083 orig_hash, iter->hash);
4084 if (!ret) {
4085 ftrace_ops_update_code(iter->ops, old_hash);
4086 free_ftrace_hash_rcu(old_hash);
4087 }
4088 mutex_unlock(&ftrace_lock);
4089 }
4090
4091 mutex_unlock(&iter->ops->func_hash->regex_lock);
4092 free_ftrace_hash(iter->hash);
4093 kfree(iter);
4094
4095 return 0;
4096 }
4097
4098 static const struct file_operations ftrace_avail_fops = {
4099 .open = ftrace_avail_open,
4100 .read = seq_read,
4101 .llseek = seq_lseek,
4102 .release = seq_release_private,
4103 };
4104
4105 static const struct file_operations ftrace_enabled_fops = {
4106 .open = ftrace_enabled_open,
4107 .read = seq_read,
4108 .llseek = seq_lseek,
4109 .release = seq_release_private,
4110 };
4111
4112 static const struct file_operations ftrace_filter_fops = {
4113 .open = ftrace_filter_open,
4114 .read = seq_read,
4115 .write = ftrace_filter_write,
4116 .llseek = tracing_lseek,
4117 .release = ftrace_regex_release,
4118 };
4119
4120 static const struct file_operations ftrace_notrace_fops = {
4121 .open = ftrace_notrace_open,
4122 .read = seq_read,
4123 .write = ftrace_notrace_write,
4124 .llseek = tracing_lseek,
4125 .release = ftrace_regex_release,
4126 };
4127
4128 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4129
4130 static DEFINE_MUTEX(graph_lock);
4131
4132 int ftrace_graph_count;
4133 int ftrace_graph_notrace_count;
4134 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
4135 unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
4136
4137 struct ftrace_graph_data {
4138 unsigned long *table;
4139 size_t size;
4140 int *count;
4141 const struct seq_operations *seq_ops;
4142 };
4143
4144 static void *
4145 __g_next(struct seq_file *m, loff_t *pos)
4146 {
4147 struct ftrace_graph_data *fgd = m->private;
4148
4149 if (*pos >= *fgd->count)
4150 return NULL;
4151 return &fgd->table[*pos];
4152 }
4153
4154 static void *
4155 g_next(struct seq_file *m, void *v, loff_t *pos)
4156 {
4157 (*pos)++;
4158 return __g_next(m, pos);
4159 }
4160
4161 static void *g_start(struct seq_file *m, loff_t *pos)
4162 {
4163 struct ftrace_graph_data *fgd = m->private;
4164
4165 mutex_lock(&graph_lock);
4166
4167 /* Nothing, tell g_show to print all functions are enabled */
4168 if (!*fgd->count && !*pos)
4169 return (void *)1;
4170
4171 return __g_next(m, pos);
4172 }
4173
4174 static void g_stop(struct seq_file *m, void *p)
4175 {
4176 mutex_unlock(&graph_lock);
4177 }
4178
4179 static int g_show(struct seq_file *m, void *v)
4180 {
4181 unsigned long *ptr = v;
4182
4183 if (!ptr)
4184 return 0;
4185
4186 if (ptr == (unsigned long *)1) {
4187 struct ftrace_graph_data *fgd = m->private;
4188
4189 if (fgd->table == ftrace_graph_funcs)
4190 seq_printf(m, "#### all functions enabled ####\n");
4191 else
4192 seq_printf(m, "#### no functions disabled ####\n");
4193 return 0;
4194 }
4195
4196 seq_printf(m, "%ps\n", (void *)*ptr);
4197
4198 return 0;
4199 }
4200
4201 static const struct seq_operations ftrace_graph_seq_ops = {
4202 .start = g_start,
4203 .next = g_next,
4204 .stop = g_stop,
4205 .show = g_show,
4206 };
4207
4208 static int
4209 __ftrace_graph_open(struct inode *inode, struct file *file,
4210 struct ftrace_graph_data *fgd)
4211 {
4212 int ret = 0;
4213
4214 mutex_lock(&graph_lock);
4215 if ((file->f_mode & FMODE_WRITE) &&
4216 (file->f_flags & O_TRUNC)) {
4217 *fgd->count = 0;
4218 memset(fgd->table, 0, fgd->size * sizeof(*fgd->table));
4219 }
4220 mutex_unlock(&graph_lock);
4221
4222 if (file->f_mode & FMODE_READ) {
4223 ret = seq_open(file, fgd->seq_ops);
4224 if (!ret) {
4225 struct seq_file *m = file->private_data;
4226 m->private = fgd;
4227 }
4228 } else
4229 file->private_data = fgd;
4230
4231 return ret;
4232 }
4233
4234 static int
4235 ftrace_graph_open(struct inode *inode, struct file *file)
4236 {
4237 struct ftrace_graph_data *fgd;
4238
4239 if (unlikely(ftrace_disabled))
4240 return -ENODEV;
4241
4242 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
4243 if (fgd == NULL)
4244 return -ENOMEM;
4245
4246 fgd->table = ftrace_graph_funcs;
4247 fgd->size = FTRACE_GRAPH_MAX_FUNCS;
4248 fgd->count = &ftrace_graph_count;
4249 fgd->seq_ops = &ftrace_graph_seq_ops;
4250
4251 return __ftrace_graph_open(inode, file, fgd);
4252 }
4253
4254 static int
4255 ftrace_graph_notrace_open(struct inode *inode, struct file *file)
4256 {
4257 struct ftrace_graph_data *fgd;
4258
4259 if (unlikely(ftrace_disabled))
4260 return -ENODEV;
4261
4262 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
4263 if (fgd == NULL)
4264 return -ENOMEM;
4265
4266 fgd->table = ftrace_graph_notrace_funcs;
4267 fgd->size = FTRACE_GRAPH_MAX_FUNCS;
4268 fgd->count = &ftrace_graph_notrace_count;
4269 fgd->seq_ops = &ftrace_graph_seq_ops;
4270
4271 return __ftrace_graph_open(inode, file, fgd);
4272 }
4273
4274 static int
4275 ftrace_graph_release(struct inode *inode, struct file *file)
4276 {
4277 if (file->f_mode & FMODE_READ) {
4278 struct seq_file *m = file->private_data;
4279
4280 kfree(m->private);
4281 seq_release(inode, file);
4282 } else {
4283 kfree(file->private_data);
4284 }
4285
4286 return 0;
4287 }
4288
4289 static int
4290 ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer)
4291 {
4292 struct dyn_ftrace *rec;
4293 struct ftrace_page *pg;
4294 int search_len;
4295 int fail = 1;
4296 int type, not;
4297 char *search;
4298 bool exists;
4299 int i;
4300
4301 /* decode regex */
4302 type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
4303 if (!not && *idx >= size)
4304 return -EBUSY;
4305
4306 search_len = strlen(search);
4307
4308 mutex_lock(&ftrace_lock);
4309
4310 if (unlikely(ftrace_disabled)) {
4311 mutex_unlock(&ftrace_lock);
4312 return -ENODEV;
4313 }
4314
4315 do_for_each_ftrace_rec(pg, rec) {
4316
4317 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
4318 /* if it is in the array */
4319 exists = false;
4320 for (i = 0; i < *idx; i++) {
4321 if (array[i] == rec->ip) {
4322 exists = true;
4323 break;
4324 }
4325 }
4326
4327 if (!not) {
4328 fail = 0;
4329 if (!exists) {
4330 array[(*idx)++] = rec->ip;
4331 if (*idx >= size)
4332 goto out;
4333 }
4334 } else {
4335 if (exists) {
4336 array[i] = array[--(*idx)];
4337 array[*idx] = 0;
4338 fail = 0;
4339 }
4340 }
4341 }
4342 } while_for_each_ftrace_rec();
4343 out:
4344 mutex_unlock(&ftrace_lock);
4345
4346 if (fail)
4347 return -EINVAL;
4348
4349 return 0;
4350 }
4351
4352 static ssize_t
4353 ftrace_graph_write(struct file *file, const char __user *ubuf,
4354 size_t cnt, loff_t *ppos)
4355 {
4356 struct trace_parser parser;
4357 ssize_t read, ret = 0;
4358 struct ftrace_graph_data *fgd = file->private_data;
4359
4360 if (!cnt)
4361 return 0;
4362
4363 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX))
4364 return -ENOMEM;
4365
4366 read = trace_get_user(&parser, ubuf, cnt, ppos);
4367
4368 if (read >= 0 && trace_parser_loaded((&parser))) {
4369 parser.buffer[parser.idx] = 0;
4370
4371 mutex_lock(&graph_lock);
4372
4373 /* we allow only one expression at a time */
4374 ret = ftrace_set_func(fgd->table, fgd->count, fgd->size,
4375 parser.buffer);
4376
4377 mutex_unlock(&graph_lock);
4378 }
4379
4380 if (!ret)
4381 ret = read;
4382
4383 trace_parser_put(&parser);
4384
4385 return ret;
4386 }
4387
4388 static const struct file_operations ftrace_graph_fops = {
4389 .open = ftrace_graph_open,
4390 .read = seq_read,
4391 .write = ftrace_graph_write,
4392 .llseek = tracing_lseek,
4393 .release = ftrace_graph_release,
4394 };
4395
4396 static const struct file_operations ftrace_graph_notrace_fops = {
4397 .open = ftrace_graph_notrace_open,
4398 .read = seq_read,
4399 .write = ftrace_graph_write,
4400 .llseek = tracing_lseek,
4401 .release = ftrace_graph_release,
4402 };
4403 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4404
4405 void ftrace_create_filter_files(struct ftrace_ops *ops,
4406 struct dentry *parent)
4407 {
4408
4409 trace_create_file("set_ftrace_filter", 0644, parent,
4410 ops, &ftrace_filter_fops);
4411
4412 trace_create_file("set_ftrace_notrace", 0644, parent,
4413 ops, &ftrace_notrace_fops);
4414 }
4415
4416 /*
4417 * The name "destroy_filter_files" is really a misnomer. Although
4418 * in the future, it may actualy delete the files, but this is
4419 * really intended to make sure the ops passed in are disabled
4420 * and that when this function returns, the caller is free to
4421 * free the ops.
4422 *
4423 * The "destroy" name is only to match the "create" name that this
4424 * should be paired with.
4425 */
4426 void ftrace_destroy_filter_files(struct ftrace_ops *ops)
4427 {
4428 mutex_lock(&ftrace_lock);
4429 if (ops->flags & FTRACE_OPS_FL_ENABLED)
4430 ftrace_shutdown(ops, 0);
4431 ops->flags |= FTRACE_OPS_FL_DELETED;
4432 mutex_unlock(&ftrace_lock);
4433 }
4434
4435 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
4436 {
4437
4438 trace_create_file("available_filter_functions", 0444,
4439 d_tracer, NULL, &ftrace_avail_fops);
4440
4441 trace_create_file("enabled_functions", 0444,
4442 d_tracer, NULL, &ftrace_enabled_fops);
4443
4444 ftrace_create_filter_files(&global_ops, d_tracer);
4445
4446 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4447 trace_create_file("set_graph_function", 0444, d_tracer,
4448 NULL,
4449 &ftrace_graph_fops);
4450 trace_create_file("set_graph_notrace", 0444, d_tracer,
4451 NULL,
4452 &ftrace_graph_notrace_fops);
4453 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4454
4455 return 0;
4456 }
4457
4458 static int ftrace_cmp_ips(const void *a, const void *b)
4459 {
4460 const unsigned long *ipa = a;
4461 const unsigned long *ipb = b;
4462
4463 if (*ipa > *ipb)
4464 return 1;
4465 if (*ipa < *ipb)
4466 return -1;
4467 return 0;
4468 }
4469
4470 static void ftrace_swap_ips(void *a, void *b, int size)
4471 {
4472 unsigned long *ipa = a;
4473 unsigned long *ipb = b;
4474 unsigned long t;
4475
4476 t = *ipa;
4477 *ipa = *ipb;
4478 *ipb = t;
4479 }
4480
4481 static int ftrace_process_locs(struct module *mod,
4482 unsigned long *start,
4483 unsigned long *end)
4484 {
4485 struct ftrace_page *start_pg;
4486 struct ftrace_page *pg;
4487 struct dyn_ftrace *rec;
4488 unsigned long count;
4489 unsigned long *p;
4490 unsigned long addr;
4491 unsigned long flags = 0; /* Shut up gcc */
4492 int ret = -ENOMEM;
4493
4494 count = end - start;
4495
4496 if (!count)
4497 return 0;
4498
4499 sort(start, count, sizeof(*start),
4500 ftrace_cmp_ips, ftrace_swap_ips);
4501
4502 start_pg = ftrace_allocate_pages(count);
4503 if (!start_pg)
4504 return -ENOMEM;
4505
4506 mutex_lock(&ftrace_lock);
4507
4508 /*
4509 * Core and each module needs their own pages, as
4510 * modules will free them when they are removed.
4511 * Force a new page to be allocated for modules.
4512 */
4513 if (!mod) {
4514 WARN_ON(ftrace_pages || ftrace_pages_start);
4515 /* First initialization */
4516 ftrace_pages = ftrace_pages_start = start_pg;
4517 } else {
4518 if (!ftrace_pages)
4519 goto out;
4520
4521 if (WARN_ON(ftrace_pages->next)) {
4522 /* Hmm, we have free pages? */
4523 while (ftrace_pages->next)
4524 ftrace_pages = ftrace_pages->next;
4525 }
4526
4527 ftrace_pages->next = start_pg;
4528 }
4529
4530 p = start;
4531 pg = start_pg;
4532 while (p < end) {
4533 addr = ftrace_call_adjust(*p++);
4534 /*
4535 * Some architecture linkers will pad between
4536 * the different mcount_loc sections of different
4537 * object files to satisfy alignments.
4538 * Skip any NULL pointers.
4539 */
4540 if (!addr)
4541 continue;
4542
4543 if (pg->index == pg->size) {
4544 /* We should have allocated enough */
4545 if (WARN_ON(!pg->next))
4546 break;
4547 pg = pg->next;
4548 }
4549
4550 rec = &pg->records[pg->index++];
4551 rec->ip = addr;
4552 }
4553
4554 /* We should have used all pages */
4555 WARN_ON(pg->next);
4556
4557 /* Assign the last page to ftrace_pages */
4558 ftrace_pages = pg;
4559
4560 /*
4561 * We only need to disable interrupts on start up
4562 * because we are modifying code that an interrupt
4563 * may execute, and the modification is not atomic.
4564 * But for modules, nothing runs the code we modify
4565 * until we are finished with it, and there's no
4566 * reason to cause large interrupt latencies while we do it.
4567 */
4568 if (!mod)
4569 local_irq_save(flags);
4570 ftrace_update_code(mod, start_pg);
4571 if (!mod)
4572 local_irq_restore(flags);
4573 ret = 0;
4574 out:
4575 mutex_unlock(&ftrace_lock);
4576
4577 return ret;
4578 }
4579
4580 #ifdef CONFIG_MODULES
4581
4582 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
4583
4584 void ftrace_release_mod(struct module *mod)
4585 {
4586 struct dyn_ftrace *rec;
4587 struct ftrace_page **last_pg;
4588 struct ftrace_page *pg;
4589 int order;
4590
4591 mutex_lock(&ftrace_lock);
4592
4593 if (ftrace_disabled)
4594 goto out_unlock;
4595
4596 /*
4597 * Each module has its own ftrace_pages, remove
4598 * them from the list.
4599 */
4600 last_pg = &ftrace_pages_start;
4601 for (pg = ftrace_pages_start; pg; pg = *last_pg) {
4602 rec = &pg->records[0];
4603 if (within_module_core(rec->ip, mod)) {
4604 /*
4605 * As core pages are first, the first
4606 * page should never be a module page.
4607 */
4608 if (WARN_ON(pg == ftrace_pages_start))
4609 goto out_unlock;
4610
4611 /* Check if we are deleting the last page */
4612 if (pg == ftrace_pages)
4613 ftrace_pages = next_to_ftrace_page(last_pg);
4614
4615 *last_pg = pg->next;
4616 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
4617 free_pages((unsigned long)pg->records, order);
4618 kfree(pg);
4619 } else
4620 last_pg = &pg->next;
4621 }
4622 out_unlock:
4623 mutex_unlock(&ftrace_lock);
4624 }
4625
4626 static void ftrace_init_module(struct module *mod,
4627 unsigned long *start, unsigned long *end)
4628 {
4629 if (ftrace_disabled || start == end)
4630 return;
4631 ftrace_process_locs(mod, start, end);
4632 }
4633
4634 void ftrace_module_init(struct module *mod)
4635 {
4636 ftrace_init_module(mod, mod->ftrace_callsites,
4637 mod->ftrace_callsites +
4638 mod->num_ftrace_callsites);
4639 }
4640
4641 static int ftrace_module_notify_exit(struct notifier_block *self,
4642 unsigned long val, void *data)
4643 {
4644 struct module *mod = data;
4645
4646 if (val == MODULE_STATE_GOING)
4647 ftrace_release_mod(mod);
4648
4649 return 0;
4650 }
4651 #else
4652 static int ftrace_module_notify_exit(struct notifier_block *self,
4653 unsigned long val, void *data)
4654 {
4655 return 0;
4656 }
4657 #endif /* CONFIG_MODULES */
4658
4659 struct notifier_block ftrace_module_exit_nb = {
4660 .notifier_call = ftrace_module_notify_exit,
4661 .priority = INT_MIN, /* Run after anything that can remove kprobes */
4662 };
4663
4664 void __init ftrace_init(void)
4665 {
4666 extern unsigned long __start_mcount_loc[];
4667 extern unsigned long __stop_mcount_loc[];
4668 unsigned long count, flags;
4669 int ret;
4670
4671 local_irq_save(flags);
4672 ret = ftrace_dyn_arch_init();
4673 local_irq_restore(flags);
4674 if (ret)
4675 goto failed;
4676
4677 count = __stop_mcount_loc - __start_mcount_loc;
4678 if (!count) {
4679 pr_info("ftrace: No functions to be traced?\n");
4680 goto failed;
4681 }
4682
4683 pr_info("ftrace: allocating %ld entries in %ld pages\n",
4684 count, count / ENTRIES_PER_PAGE + 1);
4685
4686 last_ftrace_enabled = ftrace_enabled = 1;
4687
4688 ret = ftrace_process_locs(NULL,
4689 __start_mcount_loc,
4690 __stop_mcount_loc);
4691
4692 ret = register_module_notifier(&ftrace_module_exit_nb);
4693 if (ret)
4694 pr_warning("Failed to register trace ftrace module exit notifier\n");
4695
4696 set_ftrace_early_filters();
4697
4698 return;
4699 failed:
4700 ftrace_disabled = 1;
4701 }
4702
4703 /* Do nothing if arch does not support this */
4704 void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
4705 {
4706 }
4707
4708 static void ftrace_update_trampoline(struct ftrace_ops *ops)
4709 {
4710 /* Currently, only non dynamic ops can have a trampoline */
4711 if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
4712 return;
4713
4714 arch_ftrace_update_trampoline(ops);
4715 }
4716
4717 #else
4718
4719 static struct ftrace_ops global_ops = {
4720 .func = ftrace_stub,
4721 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4722 };
4723
4724 static int __init ftrace_nodyn_init(void)
4725 {
4726 ftrace_enabled = 1;
4727 return 0;
4728 }
4729 core_initcall(ftrace_nodyn_init);
4730
4731 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
4732 static inline void ftrace_startup_enable(int command) { }
4733 static inline void ftrace_startup_all(int command) { }
4734 /* Keep as macros so we do not need to define the commands */
4735 # define ftrace_startup(ops, command) \
4736 ({ \
4737 int ___ret = __register_ftrace_function(ops); \
4738 if (!___ret) \
4739 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
4740 ___ret; \
4741 })
4742 # define ftrace_shutdown(ops, command) \
4743 ({ \
4744 int ___ret = __unregister_ftrace_function(ops); \
4745 if (!___ret) \
4746 (ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \
4747 ___ret; \
4748 })
4749
4750 # define ftrace_startup_sysctl() do { } while (0)
4751 # define ftrace_shutdown_sysctl() do { } while (0)
4752
4753 static inline int
4754 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
4755 {
4756 return 1;
4757 }
4758
4759 static void ftrace_update_trampoline(struct ftrace_ops *ops)
4760 {
4761 }
4762
4763 #endif /* CONFIG_DYNAMIC_FTRACE */
4764
4765 __init void ftrace_init_global_array_ops(struct trace_array *tr)
4766 {
4767 tr->ops = &global_ops;
4768 tr->ops->private = tr;
4769 }
4770
4771 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
4772 {
4773 /* If we filter on pids, update to use the pid function */
4774 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4775 if (WARN_ON(tr->ops->func != ftrace_stub))
4776 printk("ftrace ops had %pS for function\n",
4777 tr->ops->func);
4778 /* Only the top level instance does pid tracing */
4779 if (!list_empty(&ftrace_pids)) {
4780 set_ftrace_pid_function(func);
4781 func = ftrace_pid_func;
4782 }
4783 }
4784 tr->ops->func = func;
4785 tr->ops->private = tr;
4786 }
4787
4788 void ftrace_reset_array_ops(struct trace_array *tr)
4789 {
4790 tr->ops->func = ftrace_stub;
4791 }
4792
4793 static void
4794 ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
4795 struct ftrace_ops *op, struct pt_regs *regs)
4796 {
4797 if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
4798 return;
4799
4800 /*
4801 * Some of the ops may be dynamically allocated,
4802 * they must be freed after a synchronize_sched().
4803 */
4804 preempt_disable_notrace();
4805 trace_recursion_set(TRACE_CONTROL_BIT);
4806
4807 /*
4808 * Control funcs (perf) uses RCU. Only trace if
4809 * RCU is currently active.
4810 */
4811 if (!rcu_is_watching())
4812 goto out;
4813
4814 do_for_each_ftrace_op(op, ftrace_control_list) {
4815 if (!(op->flags & FTRACE_OPS_FL_STUB) &&
4816 !ftrace_function_local_disabled(op) &&
4817 ftrace_ops_test(op, ip, regs))
4818 op->func(ip, parent_ip, op, regs);
4819 } while_for_each_ftrace_op(op);
4820 out:
4821 trace_recursion_clear(TRACE_CONTROL_BIT);
4822 preempt_enable_notrace();
4823 }
4824
4825 static struct ftrace_ops control_ops = {
4826 .func = ftrace_ops_control_func,
4827 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4828 INIT_OPS_HASH(control_ops)
4829 };
4830
4831 static inline void
4832 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4833 struct ftrace_ops *ignored, struct pt_regs *regs)
4834 {
4835 struct ftrace_ops *op;
4836 int bit;
4837
4838 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
4839 if (bit < 0)
4840 return;
4841
4842 /*
4843 * Some of the ops may be dynamically allocated,
4844 * they must be freed after a synchronize_sched().
4845 */
4846 preempt_disable_notrace();
4847 do_for_each_ftrace_op(op, ftrace_ops_list) {
4848 if (ftrace_ops_test(op, ip, regs)) {
4849 if (FTRACE_WARN_ON(!op->func)) {
4850 pr_warn("op=%p %pS\n", op, op);
4851 goto out;
4852 }
4853 op->func(ip, parent_ip, op, regs);
4854 }
4855 } while_for_each_ftrace_op(op);
4856 out:
4857 preempt_enable_notrace();
4858 trace_clear_recursion(bit);
4859 }
4860
4861 /*
4862 * Some archs only support passing ip and parent_ip. Even though
4863 * the list function ignores the op parameter, we do not want any
4864 * C side effects, where a function is called without the caller
4865 * sending a third parameter.
4866 * Archs are to support both the regs and ftrace_ops at the same time.
4867 * If they support ftrace_ops, it is assumed they support regs.
4868 * If call backs want to use regs, they must either check for regs
4869 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
4870 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
4871 * An architecture can pass partial regs with ftrace_ops and still
4872 * set the ARCH_SUPPORT_FTARCE_OPS.
4873 */
4874 #if ARCH_SUPPORTS_FTRACE_OPS
4875 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4876 struct ftrace_ops *op, struct pt_regs *regs)
4877 {
4878 __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
4879 }
4880 #else
4881 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
4882 {
4883 __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
4884 }
4885 #endif
4886
4887 /*
4888 * If there's only one function registered but it does not support
4889 * recursion, this function will be called by the mcount trampoline.
4890 * This function will handle recursion protection.
4891 */
4892 static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip,
4893 struct ftrace_ops *op, struct pt_regs *regs)
4894 {
4895 int bit;
4896
4897 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
4898 if (bit < 0)
4899 return;
4900
4901 op->func(ip, parent_ip, op, regs);
4902
4903 trace_clear_recursion(bit);
4904 }
4905
4906 /**
4907 * ftrace_ops_get_func - get the function a trampoline should call
4908 * @ops: the ops to get the function for
4909 *
4910 * Normally the mcount trampoline will call the ops->func, but there
4911 * are times that it should not. For example, if the ops does not
4912 * have its own recursion protection, then it should call the
4913 * ftrace_ops_recurs_func() instead.
4914 *
4915 * Returns the function that the trampoline should call for @ops.
4916 */
4917 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
4918 {
4919 /*
4920 * If this is a dynamic ops or we force list func,
4921 * then it needs to call the list anyway.
4922 */
4923 if (ops->flags & FTRACE_OPS_FL_DYNAMIC || FTRACE_FORCE_LIST_FUNC)
4924 return ftrace_ops_list_func;
4925
4926 /*
4927 * If the func handles its own recursion, call it directly.
4928 * Otherwise call the recursion protected function that
4929 * will call the ftrace ops function.
4930 */
4931 if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE))
4932 return ftrace_ops_recurs_func;
4933
4934 return ops->func;
4935 }
4936
4937 static void clear_ftrace_swapper(void)
4938 {
4939 struct task_struct *p;
4940 int cpu;
4941
4942 get_online_cpus();
4943 for_each_online_cpu(cpu) {
4944 p = idle_task(cpu);
4945 clear_tsk_trace_trace(p);
4946 }
4947 put_online_cpus();
4948 }
4949
4950 static void set_ftrace_swapper(void)
4951 {
4952 struct task_struct *p;
4953 int cpu;
4954
4955 get_online_cpus();
4956 for_each_online_cpu(cpu) {
4957 p = idle_task(cpu);
4958 set_tsk_trace_trace(p);
4959 }
4960 put_online_cpus();
4961 }
4962
4963 static void clear_ftrace_pid(struct pid *pid)
4964 {
4965 struct task_struct *p;
4966
4967 rcu_read_lock();
4968 do_each_pid_task(pid, PIDTYPE_PID, p) {
4969 clear_tsk_trace_trace(p);
4970 } while_each_pid_task(pid, PIDTYPE_PID, p);
4971 rcu_read_unlock();
4972
4973 put_pid(pid);
4974 }
4975
4976 static void set_ftrace_pid(struct pid *pid)
4977 {
4978 struct task_struct *p;
4979
4980 rcu_read_lock();
4981 do_each_pid_task(pid, PIDTYPE_PID, p) {
4982 set_tsk_trace_trace(p);
4983 } while_each_pid_task(pid, PIDTYPE_PID, p);
4984 rcu_read_unlock();
4985 }
4986
4987 static void clear_ftrace_pid_task(struct pid *pid)
4988 {
4989 if (pid == ftrace_swapper_pid)
4990 clear_ftrace_swapper();
4991 else
4992 clear_ftrace_pid(pid);
4993 }
4994
4995 static void set_ftrace_pid_task(struct pid *pid)
4996 {
4997 if (pid == ftrace_swapper_pid)
4998 set_ftrace_swapper();
4999 else
5000 set_ftrace_pid(pid);
5001 }
5002
5003 static int ftrace_pid_add(int p)
5004 {
5005 struct pid *pid;
5006 struct ftrace_pid *fpid;
5007 int ret = -EINVAL;
5008
5009 mutex_lock(&ftrace_lock);
5010
5011 if (!p)
5012 pid = ftrace_swapper_pid;
5013 else
5014 pid = find_get_pid(p);
5015
5016 if (!pid)
5017 goto out;
5018
5019 ret = 0;
5020
5021 list_for_each_entry(fpid, &ftrace_pids, list)
5022 if (fpid->pid == pid)
5023 goto out_put;
5024
5025 ret = -ENOMEM;
5026
5027 fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
5028 if (!fpid)
5029 goto out_put;
5030
5031 list_add(&fpid->list, &ftrace_pids);
5032 fpid->pid = pid;
5033
5034 set_ftrace_pid_task(pid);
5035
5036 ftrace_update_pid_func();
5037
5038 ftrace_startup_all(0);
5039
5040 mutex_unlock(&ftrace_lock);
5041 return 0;
5042
5043 out_put:
5044 if (pid != ftrace_swapper_pid)
5045 put_pid(pid);
5046
5047 out:
5048 mutex_unlock(&ftrace_lock);
5049 return ret;
5050 }
5051
5052 static void ftrace_pid_reset(void)
5053 {
5054 struct ftrace_pid *fpid, *safe;
5055
5056 mutex_lock(&ftrace_lock);
5057 list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
5058 struct pid *pid = fpid->pid;
5059
5060 clear_ftrace_pid_task(pid);
5061
5062 list_del(&fpid->list);
5063 kfree(fpid);
5064 }
5065
5066 ftrace_update_pid_func();
5067 ftrace_startup_all(0);
5068
5069 mutex_unlock(&ftrace_lock);
5070 }
5071
5072 static void *fpid_start(struct seq_file *m, loff_t *pos)
5073 {
5074 mutex_lock(&ftrace_lock);
5075
5076 if (list_empty(&ftrace_pids) && (!*pos))
5077 return (void *) 1;
5078
5079 return seq_list_start(&ftrace_pids, *pos);
5080 }
5081
5082 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
5083 {
5084 if (v == (void *)1)
5085 return NULL;
5086
5087 return seq_list_next(v, &ftrace_pids, pos);
5088 }
5089
5090 static void fpid_stop(struct seq_file *m, void *p)
5091 {
5092 mutex_unlock(&ftrace_lock);
5093 }
5094
5095 static int fpid_show(struct seq_file *m, void *v)
5096 {
5097 const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
5098
5099 if (v == (void *)1) {
5100 seq_printf(m, "no pid\n");
5101 return 0;
5102 }
5103
5104 if (fpid->pid == ftrace_swapper_pid)
5105 seq_printf(m, "swapper tasks\n");
5106 else
5107 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
5108
5109 return 0;
5110 }
5111
5112 static const struct seq_operations ftrace_pid_sops = {
5113 .start = fpid_start,
5114 .next = fpid_next,
5115 .stop = fpid_stop,
5116 .show = fpid_show,
5117 };
5118
5119 static int
5120 ftrace_pid_open(struct inode *inode, struct file *file)
5121 {
5122 int ret = 0;
5123
5124 if ((file->f_mode & FMODE_WRITE) &&
5125 (file->f_flags & O_TRUNC))
5126 ftrace_pid_reset();
5127
5128 if (file->f_mode & FMODE_READ)
5129 ret = seq_open(file, &ftrace_pid_sops);
5130
5131 return ret;
5132 }
5133
5134 static ssize_t
5135 ftrace_pid_write(struct file *filp, const char __user *ubuf,
5136 size_t cnt, loff_t *ppos)
5137 {
5138 char buf[64], *tmp;
5139 long val;
5140 int ret;
5141
5142 if (cnt >= sizeof(buf))
5143 return -EINVAL;
5144
5145 if (copy_from_user(&buf, ubuf, cnt))
5146 return -EFAULT;
5147
5148 buf[cnt] = 0;
5149
5150 /*
5151 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
5152 * to clean the filter quietly.
5153 */
5154 tmp = strstrip(buf);
5155 if (strlen(tmp) == 0)
5156 return 1;
5157
5158 ret = kstrtol(tmp, 10, &val);
5159 if (ret < 0)
5160 return ret;
5161
5162 ret = ftrace_pid_add(val);
5163
5164 return ret ? ret : cnt;
5165 }
5166
5167 static int
5168 ftrace_pid_release(struct inode *inode, struct file *file)
5169 {
5170 if (file->f_mode & FMODE_READ)
5171 seq_release(inode, file);
5172
5173 return 0;
5174 }
5175
5176 static const struct file_operations ftrace_pid_fops = {
5177 .open = ftrace_pid_open,
5178 .write = ftrace_pid_write,
5179 .read = seq_read,
5180 .llseek = tracing_lseek,
5181 .release = ftrace_pid_release,
5182 };
5183
5184 static __init int ftrace_init_debugfs(void)
5185 {
5186 struct dentry *d_tracer;
5187
5188 d_tracer = tracing_init_dentry();
5189 if (!d_tracer)
5190 return 0;
5191
5192 ftrace_init_dyn_debugfs(d_tracer);
5193
5194 trace_create_file("set_ftrace_pid", 0644, d_tracer,
5195 NULL, &ftrace_pid_fops);
5196
5197 ftrace_profile_debugfs(d_tracer);
5198
5199 return 0;
5200 }
5201 fs_initcall(ftrace_init_debugfs);
5202
5203 /**
5204 * ftrace_kill - kill ftrace
5205 *
5206 * This function should be used by panic code. It stops ftrace
5207 * but in a not so nice way. If you need to simply kill ftrace
5208 * from a non-atomic section, use ftrace_kill.
5209 */
5210 void ftrace_kill(void)
5211 {
5212 ftrace_disabled = 1;
5213 ftrace_enabled = 0;
5214 clear_ftrace_function();
5215 }
5216
5217 /**
5218 * Test if ftrace is dead or not.
5219 */
5220 int ftrace_is_dead(void)
5221 {
5222 return ftrace_disabled;
5223 }
5224
5225 /**
5226 * register_ftrace_function - register a function for profiling
5227 * @ops - ops structure that holds the function for profiling.
5228 *
5229 * Register a function to be called by all functions in the
5230 * kernel.
5231 *
5232 * Note: @ops->func and all the functions it calls must be labeled
5233 * with "notrace", otherwise it will go into a
5234 * recursive loop.
5235 */
5236 int register_ftrace_function(struct ftrace_ops *ops)
5237 {
5238 int ret = -1;
5239
5240 ftrace_ops_init(ops);
5241
5242 mutex_lock(&ftrace_lock);
5243
5244 ret = ftrace_startup(ops, 0);
5245
5246 mutex_unlock(&ftrace_lock);
5247
5248 return ret;
5249 }
5250 EXPORT_SYMBOL_GPL(register_ftrace_function);
5251
5252 /**
5253 * unregister_ftrace_function - unregister a function for profiling.
5254 * @ops - ops structure that holds the function to unregister
5255 *
5256 * Unregister a function that was added to be called by ftrace profiling.
5257 */
5258 int unregister_ftrace_function(struct ftrace_ops *ops)
5259 {
5260 int ret;
5261
5262 mutex_lock(&ftrace_lock);
5263 ret = ftrace_shutdown(ops, 0);
5264 mutex_unlock(&ftrace_lock);
5265
5266 return ret;
5267 }
5268 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
5269
5270 int
5271 ftrace_enable_sysctl(struct ctl_table *table, int write,
5272 void __user *buffer, size_t *lenp,
5273 loff_t *ppos)
5274 {
5275 int ret = -ENODEV;
5276
5277 mutex_lock(&ftrace_lock);
5278
5279 if (unlikely(ftrace_disabled))
5280 goto out;
5281
5282 ret = proc_dointvec(table, write, buffer, lenp, ppos);
5283
5284 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
5285 goto out;
5286
5287 last_ftrace_enabled = !!ftrace_enabled;
5288
5289 if (ftrace_enabled) {
5290
5291 ftrace_startup_sysctl();
5292
5293 /* we are starting ftrace again */
5294 if (ftrace_ops_list != &ftrace_list_end)
5295 update_ftrace_function();
5296
5297 } else {
5298 /* stopping ftrace calls (just send to ftrace_stub) */
5299 ftrace_trace_function = ftrace_stub;
5300
5301 ftrace_shutdown_sysctl();
5302 }
5303
5304 out:
5305 mutex_unlock(&ftrace_lock);
5306 return ret;
5307 }
5308
5309 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5310
5311 static struct ftrace_ops graph_ops = {
5312 .func = ftrace_stub,
5313 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
5314 FTRACE_OPS_FL_INITIALIZED |
5315 FTRACE_OPS_FL_STUB,
5316 #ifdef FTRACE_GRAPH_TRAMP_ADDR
5317 .trampoline = FTRACE_GRAPH_TRAMP_ADDR,
5318 #endif
5319 ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
5320 };
5321
5322 static int ftrace_graph_active;
5323
5324 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
5325 {
5326 return 0;
5327 }
5328
5329 /* The callbacks that hook a function */
5330 trace_func_graph_ret_t ftrace_graph_return =
5331 (trace_func_graph_ret_t)ftrace_stub;
5332 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
5333 static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
5334
5335 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
5336 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
5337 {
5338 int i;
5339 int ret = 0;
5340 unsigned long flags;
5341 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
5342 struct task_struct *g, *t;
5343
5344 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
5345 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
5346 * sizeof(struct ftrace_ret_stack),
5347 GFP_KERNEL);
5348 if (!ret_stack_list[i]) {
5349 start = 0;
5350 end = i;
5351 ret = -ENOMEM;
5352 goto free;
5353 }
5354 }
5355
5356 read_lock_irqsave(&tasklist_lock, flags);
5357 do_each_thread(g, t) {
5358 if (start == end) {
5359 ret = -EAGAIN;
5360 goto unlock;
5361 }
5362
5363 if (t->ret_stack == NULL) {
5364 atomic_set(&t->tracing_graph_pause, 0);
5365 atomic_set(&t->trace_overrun, 0);
5366 t->curr_ret_stack = -1;
5367 /* Make sure the tasks see the -1 first: */
5368 smp_wmb();
5369 t->ret_stack = ret_stack_list[start++];
5370 }
5371 } while_each_thread(g, t);
5372
5373 unlock:
5374 read_unlock_irqrestore(&tasklist_lock, flags);
5375 free:
5376 for (i = start; i < end; i++)
5377 kfree(ret_stack_list[i]);
5378 return ret;
5379 }
5380
5381 static void
5382 ftrace_graph_probe_sched_switch(void *ignore,
5383 struct task_struct *prev, struct task_struct *next)
5384 {
5385 unsigned long long timestamp;
5386 int index;
5387
5388 /*
5389 * Does the user want to count the time a function was asleep.
5390 * If so, do not update the time stamps.
5391 */
5392 if (trace_flags & TRACE_ITER_SLEEP_TIME)
5393 return;
5394
5395 timestamp = trace_clock_local();
5396
5397 prev->ftrace_timestamp = timestamp;
5398
5399 /* only process tasks that we timestamped */
5400 if (!next->ftrace_timestamp)
5401 return;
5402
5403 /*
5404 * Update all the counters in next to make up for the
5405 * time next was sleeping.
5406 */
5407 timestamp -= next->ftrace_timestamp;
5408
5409 for (index = next->curr_ret_stack; index >= 0; index--)
5410 next->ret_stack[index].calltime += timestamp;
5411 }
5412
5413 /* Allocate a return stack for each task */
5414 static int start_graph_tracing(void)
5415 {
5416 struct ftrace_ret_stack **ret_stack_list;
5417 int ret, cpu;
5418
5419 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
5420 sizeof(struct ftrace_ret_stack *),
5421 GFP_KERNEL);
5422
5423 if (!ret_stack_list)
5424 return -ENOMEM;
5425
5426 /* The cpu_boot init_task->ret_stack will never be freed */
5427 for_each_online_cpu(cpu) {
5428 if (!idle_task(cpu)->ret_stack)
5429 ftrace_graph_init_idle_task(idle_task(cpu), cpu);
5430 }
5431
5432 do {
5433 ret = alloc_retstack_tasklist(ret_stack_list);
5434 } while (ret == -EAGAIN);
5435
5436 if (!ret) {
5437 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
5438 if (ret)
5439 pr_info("ftrace_graph: Couldn't activate tracepoint"
5440 " probe to kernel_sched_switch\n");
5441 }
5442
5443 kfree(ret_stack_list);
5444 return ret;
5445 }
5446
5447 /*
5448 * Hibernation protection.
5449 * The state of the current task is too much unstable during
5450 * suspend/restore to disk. We want to protect against that.
5451 */
5452 static int
5453 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
5454 void *unused)
5455 {
5456 switch (state) {
5457 case PM_HIBERNATION_PREPARE:
5458 pause_graph_tracing();
5459 break;
5460
5461 case PM_POST_HIBERNATION:
5462 unpause_graph_tracing();
5463 break;
5464 }
5465 return NOTIFY_DONE;
5466 }
5467
5468 static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
5469 {
5470 if (!ftrace_ops_test(&global_ops, trace->func, NULL))
5471 return 0;
5472 return __ftrace_graph_entry(trace);
5473 }
5474
5475 /*
5476 * The function graph tracer should only trace the functions defined
5477 * by set_ftrace_filter and set_ftrace_notrace. If another function
5478 * tracer ops is registered, the graph tracer requires testing the
5479 * function against the global ops, and not just trace any function
5480 * that any ftrace_ops registered.
5481 */
5482 static void update_function_graph_func(void)
5483 {
5484 struct ftrace_ops *op;
5485 bool do_test = false;
5486
5487 /*
5488 * The graph and global ops share the same set of functions
5489 * to test. If any other ops is on the list, then
5490 * the graph tracing needs to test if its the function
5491 * it should call.
5492 */
5493 do_for_each_ftrace_op(op, ftrace_ops_list) {
5494 if (op != &global_ops && op != &graph_ops &&
5495 op != &ftrace_list_end) {
5496 do_test = true;
5497 /* in double loop, break out with goto */
5498 goto out;
5499 }
5500 } while_for_each_ftrace_op(op);
5501 out:
5502 if (do_test)
5503 ftrace_graph_entry = ftrace_graph_entry_test;
5504 else
5505 ftrace_graph_entry = __ftrace_graph_entry;
5506 }
5507
5508 static struct notifier_block ftrace_suspend_notifier = {
5509 .notifier_call = ftrace_suspend_notifier_call,
5510 };
5511
5512 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
5513 trace_func_graph_ent_t entryfunc)
5514 {
5515 int ret = 0;
5516
5517 mutex_lock(&ftrace_lock);
5518
5519 /* we currently allow only one tracer registered at a time */
5520 if (ftrace_graph_active) {
5521 ret = -EBUSY;
5522 goto out;
5523 }
5524
5525 register_pm_notifier(&ftrace_suspend_notifier);
5526
5527 ftrace_graph_active++;
5528 ret = start_graph_tracing();
5529 if (ret) {
5530 ftrace_graph_active--;
5531 goto out;
5532 }
5533
5534 ftrace_graph_return = retfunc;
5535
5536 /*
5537 * Update the indirect function to the entryfunc, and the
5538 * function that gets called to the entry_test first. Then
5539 * call the update fgraph entry function to determine if
5540 * the entryfunc should be called directly or not.
5541 */
5542 __ftrace_graph_entry = entryfunc;
5543 ftrace_graph_entry = ftrace_graph_entry_test;
5544 update_function_graph_func();
5545
5546 ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
5547 out:
5548 mutex_unlock(&ftrace_lock);
5549 return ret;
5550 }
5551
5552 void unregister_ftrace_graph(void)
5553 {
5554 mutex_lock(&ftrace_lock);
5555
5556 if (unlikely(!ftrace_graph_active))
5557 goto out;
5558
5559 ftrace_graph_active--;
5560 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
5561 ftrace_graph_entry = ftrace_graph_entry_stub;
5562 __ftrace_graph_entry = ftrace_graph_entry_stub;
5563 ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
5564 unregister_pm_notifier(&ftrace_suspend_notifier);
5565 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
5566
5567 #ifdef CONFIG_DYNAMIC_FTRACE
5568 /*
5569 * Function graph does not allocate the trampoline, but
5570 * other global_ops do. We need to reset the ALLOC_TRAMP flag
5571 * if one was used.
5572 */
5573 global_ops.trampoline = save_global_trampoline;
5574 if (save_global_flags & FTRACE_OPS_FL_ALLOC_TRAMP)
5575 global_ops.flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
5576 #endif
5577
5578 out:
5579 mutex_unlock(&ftrace_lock);
5580 }
5581
5582 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
5583
5584 static void
5585 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
5586 {
5587 atomic_set(&t->tracing_graph_pause, 0);
5588 atomic_set(&t->trace_overrun, 0);
5589 t->ftrace_timestamp = 0;
5590 /* make curr_ret_stack visible before we add the ret_stack */
5591 smp_wmb();
5592 t->ret_stack = ret_stack;
5593 }
5594
5595 /*
5596 * Allocate a return stack for the idle task. May be the first
5597 * time through, or it may be done by CPU hotplug online.
5598 */
5599 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
5600 {
5601 t->curr_ret_stack = -1;
5602 /*
5603 * The idle task has no parent, it either has its own
5604 * stack or no stack at all.
5605 */
5606 if (t->ret_stack)
5607 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
5608
5609 if (ftrace_graph_active) {
5610 struct ftrace_ret_stack *ret_stack;
5611
5612 ret_stack = per_cpu(idle_ret_stack, cpu);
5613 if (!ret_stack) {
5614 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
5615 * sizeof(struct ftrace_ret_stack),
5616 GFP_KERNEL);
5617 if (!ret_stack)
5618 return;
5619 per_cpu(idle_ret_stack, cpu) = ret_stack;
5620 }
5621 graph_init_task(t, ret_stack);
5622 }
5623 }
5624
5625 /* Allocate a return stack for newly created task */
5626 void ftrace_graph_init_task(struct task_struct *t)
5627 {
5628 /* Make sure we do not use the parent ret_stack */
5629 t->ret_stack = NULL;
5630 t->curr_ret_stack = -1;
5631
5632 if (ftrace_graph_active) {
5633 struct ftrace_ret_stack *ret_stack;
5634
5635 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
5636 * sizeof(struct ftrace_ret_stack),
5637 GFP_KERNEL);
5638 if (!ret_stack)
5639 return;
5640 graph_init_task(t, ret_stack);
5641 }
5642 }
5643
5644 void ftrace_graph_exit_task(struct task_struct *t)
5645 {
5646 struct ftrace_ret_stack *ret_stack = t->ret_stack;
5647
5648 t->ret_stack = NULL;
5649 /* NULL must become visible to IRQs before we free it: */
5650 barrier();
5651
5652 kfree(ret_stack);
5653 }
5654 #endif
This page took 0.149926 seconds and 5 git commands to generate.