ftrace: Cleanup of global variables ftrace_new_pgs and ftrace_update_cnt
[deliverable/linux.git] / kernel / trace / ftrace.c
1 /*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
14 */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/bsearch.h>
26 #include <linux/module.h>
27 #include <linux/ftrace.h>
28 #include <linux/sysctl.h>
29 #include <linux/slab.h>
30 #include <linux/ctype.h>
31 #include <linux/sort.h>
32 #include <linux/list.h>
33 #include <linux/hash.h>
34 #include <linux/rcupdate.h>
35
36 #include <trace/events/sched.h>
37
38 #include <asm/setup.h>
39
40 #include "trace_output.h"
41 #include "trace_stat.h"
42
43 #define FTRACE_WARN_ON(cond) \
44 ({ \
45 int ___r = cond; \
46 if (WARN_ON(___r)) \
47 ftrace_kill(); \
48 ___r; \
49 })
50
51 #define FTRACE_WARN_ON_ONCE(cond) \
52 ({ \
53 int ___r = cond; \
54 if (WARN_ON_ONCE(___r)) \
55 ftrace_kill(); \
56 ___r; \
57 })
58
59 /* hash bits for specific function selection */
60 #define FTRACE_HASH_BITS 7
61 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
62 #define FTRACE_HASH_DEFAULT_BITS 10
63 #define FTRACE_HASH_MAX_BITS 12
64
65 #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
66
67 #ifdef CONFIG_DYNAMIC_FTRACE
68 #define INIT_REGEX_LOCK(opsname) \
69 .regex_lock = __MUTEX_INITIALIZER(opsname.regex_lock),
70 #else
71 #define INIT_REGEX_LOCK(opsname)
72 #endif
73
74 static struct ftrace_ops ftrace_list_end __read_mostly = {
75 .func = ftrace_stub,
76 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
77 };
78
79 /* ftrace_enabled is a method to turn ftrace on or off */
80 int ftrace_enabled __read_mostly;
81 static int last_ftrace_enabled;
82
83 /* Quick disabling of function tracer. */
84 int function_trace_stop __read_mostly;
85
86 /* Current function tracing op */
87 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
88 /* What to set function_trace_op to */
89 static struct ftrace_ops *set_function_trace_op;
90
91 /* List for set_ftrace_pid's pids. */
92 LIST_HEAD(ftrace_pids);
93 struct ftrace_pid {
94 struct list_head list;
95 struct pid *pid;
96 };
97
98 /*
99 * ftrace_disabled is set when an anomaly is discovered.
100 * ftrace_disabled is much stronger than ftrace_enabled.
101 */
102 static int ftrace_disabled __read_mostly;
103
104 static DEFINE_MUTEX(ftrace_lock);
105
106 static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
107 static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
108 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
109 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
110 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
111 static struct ftrace_ops global_ops;
112 static struct ftrace_ops control_ops;
113
114 #if ARCH_SUPPORTS_FTRACE_OPS
115 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
116 struct ftrace_ops *op, struct pt_regs *regs);
117 #else
118 /* See comment below, where ftrace_ops_list_func is defined */
119 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
120 #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
121 #endif
122
123 /*
124 * Traverse the ftrace_global_list, invoking all entries. The reason that we
125 * can use rcu_dereference_raw_notrace() is that elements removed from this list
126 * are simply leaked, so there is no need to interact with a grace-period
127 * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle
128 * concurrent insertions into the ftrace_global_list.
129 *
130 * Silly Alpha and silly pointer-speculation compiler optimizations!
131 */
132 #define do_for_each_ftrace_op(op, list) \
133 op = rcu_dereference_raw_notrace(list); \
134 do
135
136 /*
137 * Optimized for just a single item in the list (as that is the normal case).
138 */
139 #define while_for_each_ftrace_op(op) \
140 while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \
141 unlikely((op) != &ftrace_list_end))
142
143 static inline void ftrace_ops_init(struct ftrace_ops *ops)
144 {
145 #ifdef CONFIG_DYNAMIC_FTRACE
146 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
147 mutex_init(&ops->regex_lock);
148 ops->flags |= FTRACE_OPS_FL_INITIALIZED;
149 }
150 #endif
151 }
152
153 /**
154 * ftrace_nr_registered_ops - return number of ops registered
155 *
156 * Returns the number of ftrace_ops registered and tracing functions
157 */
158 int ftrace_nr_registered_ops(void)
159 {
160 struct ftrace_ops *ops;
161 int cnt = 0;
162
163 mutex_lock(&ftrace_lock);
164
165 for (ops = ftrace_ops_list;
166 ops != &ftrace_list_end; ops = ops->next)
167 cnt++;
168
169 mutex_unlock(&ftrace_lock);
170
171 return cnt;
172 }
173
174 static void
175 ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
176 struct ftrace_ops *op, struct pt_regs *regs)
177 {
178 int bit;
179
180 bit = trace_test_and_set_recursion(TRACE_GLOBAL_START, TRACE_GLOBAL_MAX);
181 if (bit < 0)
182 return;
183
184 do_for_each_ftrace_op(op, ftrace_global_list) {
185 op->func(ip, parent_ip, op, regs);
186 } while_for_each_ftrace_op(op);
187
188 trace_clear_recursion(bit);
189 }
190
191 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
192 struct ftrace_ops *op, struct pt_regs *regs)
193 {
194 if (!test_tsk_trace_trace(current))
195 return;
196
197 ftrace_pid_function(ip, parent_ip, op, regs);
198 }
199
200 static void set_ftrace_pid_function(ftrace_func_t func)
201 {
202 /* do not set ftrace_pid_function to itself! */
203 if (func != ftrace_pid_func)
204 ftrace_pid_function = func;
205 }
206
207 /**
208 * clear_ftrace_function - reset the ftrace function
209 *
210 * This NULLs the ftrace function and in essence stops
211 * tracing. There may be lag
212 */
213 void clear_ftrace_function(void)
214 {
215 ftrace_trace_function = ftrace_stub;
216 ftrace_pid_function = ftrace_stub;
217 }
218
219 static void control_ops_disable_all(struct ftrace_ops *ops)
220 {
221 int cpu;
222
223 for_each_possible_cpu(cpu)
224 *per_cpu_ptr(ops->disabled, cpu) = 1;
225 }
226
227 static int control_ops_alloc(struct ftrace_ops *ops)
228 {
229 int __percpu *disabled;
230
231 disabled = alloc_percpu(int);
232 if (!disabled)
233 return -ENOMEM;
234
235 ops->disabled = disabled;
236 control_ops_disable_all(ops);
237 return 0;
238 }
239
240 static void control_ops_free(struct ftrace_ops *ops)
241 {
242 free_percpu(ops->disabled);
243 }
244
245 static void update_global_ops(void)
246 {
247 ftrace_func_t func = ftrace_global_list_func;
248 void *private = NULL;
249
250 /* The list has its own recursion protection. */
251 global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
252
253 /*
254 * If there's only one function registered, then call that
255 * function directly. Otherwise, we need to iterate over the
256 * registered callers.
257 */
258 if (ftrace_global_list == &ftrace_list_end ||
259 ftrace_global_list->next == &ftrace_list_end) {
260 func = ftrace_global_list->func;
261 private = ftrace_global_list->private;
262 /*
263 * As we are calling the function directly.
264 * If it does not have recursion protection,
265 * the function_trace_op needs to be updated
266 * accordingly.
267 */
268 if (!(ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE))
269 global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE;
270 }
271
272 /* If we filter on pids, update to use the pid function */
273 if (!list_empty(&ftrace_pids)) {
274 set_ftrace_pid_function(func);
275 func = ftrace_pid_func;
276 }
277
278 global_ops.func = func;
279 global_ops.private = private;
280 }
281
282 static void ftrace_sync(struct work_struct *work)
283 {
284 /*
285 * This function is just a stub to implement a hard force
286 * of synchronize_sched(). This requires synchronizing
287 * tasks even in userspace and idle.
288 *
289 * Yes, function tracing is rude.
290 */
291 }
292
293 static void ftrace_sync_ipi(void *data)
294 {
295 /* Probably not needed, but do it anyway */
296 smp_rmb();
297 }
298
299 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
300 static void update_function_graph_func(void);
301 #else
302 static inline void update_function_graph_func(void) { }
303 #endif
304
305 static void update_ftrace_function(void)
306 {
307 ftrace_func_t func;
308
309 update_global_ops();
310
311 /*
312 * If we are at the end of the list and this ops is
313 * recursion safe and not dynamic and the arch supports passing ops,
314 * then have the mcount trampoline call the function directly.
315 */
316 if (ftrace_ops_list == &ftrace_list_end ||
317 (ftrace_ops_list->next == &ftrace_list_end &&
318 !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
319 (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) &&
320 !FTRACE_FORCE_LIST_FUNC)) {
321 /* Set the ftrace_ops that the arch callback uses */
322 if (ftrace_ops_list == &global_ops)
323 set_function_trace_op = ftrace_global_list;
324 else
325 set_function_trace_op = ftrace_ops_list;
326 func = ftrace_ops_list->func;
327 } else {
328 /* Just use the default ftrace_ops */
329 set_function_trace_op = &ftrace_list_end;
330 func = ftrace_ops_list_func;
331 }
332
333 /* If there's no change, then do nothing more here */
334 if (ftrace_trace_function == func)
335 return;
336
337 update_function_graph_func();
338
339 /*
340 * If we are using the list function, it doesn't care
341 * about the function_trace_ops.
342 */
343 if (func == ftrace_ops_list_func) {
344 ftrace_trace_function = func;
345 /*
346 * Don't even bother setting function_trace_ops,
347 * it would be racy to do so anyway.
348 */
349 return;
350 }
351
352 #ifndef CONFIG_DYNAMIC_FTRACE
353 /*
354 * For static tracing, we need to be a bit more careful.
355 * The function change takes affect immediately. Thus,
356 * we need to coorditate the setting of the function_trace_ops
357 * with the setting of the ftrace_trace_function.
358 *
359 * Set the function to the list ops, which will call the
360 * function we want, albeit indirectly, but it handles the
361 * ftrace_ops and doesn't depend on function_trace_op.
362 */
363 ftrace_trace_function = ftrace_ops_list_func;
364 /*
365 * Make sure all CPUs see this. Yes this is slow, but static
366 * tracing is slow and nasty to have enabled.
367 */
368 schedule_on_each_cpu(ftrace_sync);
369 /* Now all cpus are using the list ops. */
370 function_trace_op = set_function_trace_op;
371 /* Make sure the function_trace_op is visible on all CPUs */
372 smp_wmb();
373 /* Nasty way to force a rmb on all cpus */
374 smp_call_function(ftrace_sync_ipi, NULL, 1);
375 /* OK, we are all set to update the ftrace_trace_function now! */
376 #endif /* !CONFIG_DYNAMIC_FTRACE */
377
378 ftrace_trace_function = func;
379 }
380
381 static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
382 {
383 ops->next = *list;
384 /*
385 * We are entering ops into the list but another
386 * CPU might be walking that list. We need to make sure
387 * the ops->next pointer is valid before another CPU sees
388 * the ops pointer included into the list.
389 */
390 rcu_assign_pointer(*list, ops);
391 }
392
393 static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
394 {
395 struct ftrace_ops **p;
396
397 /*
398 * If we are removing the last function, then simply point
399 * to the ftrace_stub.
400 */
401 if (*list == ops && ops->next == &ftrace_list_end) {
402 *list = &ftrace_list_end;
403 return 0;
404 }
405
406 for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
407 if (*p == ops)
408 break;
409
410 if (*p != ops)
411 return -1;
412
413 *p = (*p)->next;
414 return 0;
415 }
416
417 static void add_ftrace_list_ops(struct ftrace_ops **list,
418 struct ftrace_ops *main_ops,
419 struct ftrace_ops *ops)
420 {
421 int first = *list == &ftrace_list_end;
422 add_ftrace_ops(list, ops);
423 if (first)
424 add_ftrace_ops(&ftrace_ops_list, main_ops);
425 }
426
427 static int remove_ftrace_list_ops(struct ftrace_ops **list,
428 struct ftrace_ops *main_ops,
429 struct ftrace_ops *ops)
430 {
431 int ret = remove_ftrace_ops(list, ops);
432 if (!ret && *list == &ftrace_list_end)
433 ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
434 return ret;
435 }
436
437 static int __register_ftrace_function(struct ftrace_ops *ops)
438 {
439 if (ops->flags & FTRACE_OPS_FL_DELETED)
440 return -EINVAL;
441
442 if (FTRACE_WARN_ON(ops == &global_ops))
443 return -EINVAL;
444
445 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
446 return -EBUSY;
447
448 /* We don't support both control and global flags set. */
449 if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
450 return -EINVAL;
451
452 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
453 /*
454 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
455 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
456 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
457 */
458 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
459 !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
460 return -EINVAL;
461
462 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
463 ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
464 #endif
465
466 if (!core_kernel_data((unsigned long)ops))
467 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
468
469 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
470 add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops);
471 ops->flags |= FTRACE_OPS_FL_ENABLED;
472 } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
473 if (control_ops_alloc(ops))
474 return -ENOMEM;
475 add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
476 } else
477 add_ftrace_ops(&ftrace_ops_list, ops);
478
479 if (ftrace_enabled)
480 update_ftrace_function();
481
482 return 0;
483 }
484
485 static int __unregister_ftrace_function(struct ftrace_ops *ops)
486 {
487 int ret;
488
489 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
490 return -EBUSY;
491
492 if (FTRACE_WARN_ON(ops == &global_ops))
493 return -EINVAL;
494
495 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
496 ret = remove_ftrace_list_ops(&ftrace_global_list,
497 &global_ops, ops);
498 if (!ret)
499 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
500 } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
501 ret = remove_ftrace_list_ops(&ftrace_control_list,
502 &control_ops, ops);
503 } else
504 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
505
506 if (ret < 0)
507 return ret;
508
509 if (ftrace_enabled)
510 update_ftrace_function();
511
512 return 0;
513 }
514
515 static void ftrace_update_pid_func(void)
516 {
517 /* Only do something if we are tracing something */
518 if (ftrace_trace_function == ftrace_stub)
519 return;
520
521 update_ftrace_function();
522 }
523
524 #ifdef CONFIG_FUNCTION_PROFILER
525 struct ftrace_profile {
526 struct hlist_node node;
527 unsigned long ip;
528 unsigned long counter;
529 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
530 unsigned long long time;
531 unsigned long long time_squared;
532 #endif
533 };
534
535 struct ftrace_profile_page {
536 struct ftrace_profile_page *next;
537 unsigned long index;
538 struct ftrace_profile records[];
539 };
540
541 struct ftrace_profile_stat {
542 atomic_t disabled;
543 struct hlist_head *hash;
544 struct ftrace_profile_page *pages;
545 struct ftrace_profile_page *start;
546 struct tracer_stat stat;
547 };
548
549 #define PROFILE_RECORDS_SIZE \
550 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
551
552 #define PROFILES_PER_PAGE \
553 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
554
555 static int ftrace_profile_enabled __read_mostly;
556
557 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
558 static DEFINE_MUTEX(ftrace_profile_lock);
559
560 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
561
562 #define FTRACE_PROFILE_HASH_BITS 10
563 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
564
565 static void *
566 function_stat_next(void *v, int idx)
567 {
568 struct ftrace_profile *rec = v;
569 struct ftrace_profile_page *pg;
570
571 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
572
573 again:
574 if (idx != 0)
575 rec++;
576
577 if ((void *)rec >= (void *)&pg->records[pg->index]) {
578 pg = pg->next;
579 if (!pg)
580 return NULL;
581 rec = &pg->records[0];
582 if (!rec->counter)
583 goto again;
584 }
585
586 return rec;
587 }
588
589 static void *function_stat_start(struct tracer_stat *trace)
590 {
591 struct ftrace_profile_stat *stat =
592 container_of(trace, struct ftrace_profile_stat, stat);
593
594 if (!stat || !stat->start)
595 return NULL;
596
597 return function_stat_next(&stat->start->records[0], 0);
598 }
599
600 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
601 /* function graph compares on total time */
602 static int function_stat_cmp(void *p1, void *p2)
603 {
604 struct ftrace_profile *a = p1;
605 struct ftrace_profile *b = p2;
606
607 if (a->time < b->time)
608 return -1;
609 if (a->time > b->time)
610 return 1;
611 else
612 return 0;
613 }
614 #else
615 /* not function graph compares against hits */
616 static int function_stat_cmp(void *p1, void *p2)
617 {
618 struct ftrace_profile *a = p1;
619 struct ftrace_profile *b = p2;
620
621 if (a->counter < b->counter)
622 return -1;
623 if (a->counter > b->counter)
624 return 1;
625 else
626 return 0;
627 }
628 #endif
629
630 static int function_stat_headers(struct seq_file *m)
631 {
632 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
633 seq_printf(m, " Function "
634 "Hit Time Avg s^2\n"
635 " -------- "
636 "--- ---- --- ---\n");
637 #else
638 seq_printf(m, " Function Hit\n"
639 " -------- ---\n");
640 #endif
641 return 0;
642 }
643
644 static int function_stat_show(struct seq_file *m, void *v)
645 {
646 struct ftrace_profile *rec = v;
647 char str[KSYM_SYMBOL_LEN];
648 int ret = 0;
649 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
650 static struct trace_seq s;
651 unsigned long long avg;
652 unsigned long long stddev;
653 #endif
654 mutex_lock(&ftrace_profile_lock);
655
656 /* we raced with function_profile_reset() */
657 if (unlikely(rec->counter == 0)) {
658 ret = -EBUSY;
659 goto out;
660 }
661
662 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
663 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
664
665 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
666 seq_printf(m, " ");
667 avg = rec->time;
668 do_div(avg, rec->counter);
669
670 /* Sample standard deviation (s^2) */
671 if (rec->counter <= 1)
672 stddev = 0;
673 else {
674 /*
675 * Apply Welford's method:
676 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
677 */
678 stddev = rec->counter * rec->time_squared -
679 rec->time * rec->time;
680
681 /*
682 * Divide only 1000 for ns^2 -> us^2 conversion.
683 * trace_print_graph_duration will divide 1000 again.
684 */
685 do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
686 }
687
688 trace_seq_init(&s);
689 trace_print_graph_duration(rec->time, &s);
690 trace_seq_puts(&s, " ");
691 trace_print_graph_duration(avg, &s);
692 trace_seq_puts(&s, " ");
693 trace_print_graph_duration(stddev, &s);
694 trace_print_seq(m, &s);
695 #endif
696 seq_putc(m, '\n');
697 out:
698 mutex_unlock(&ftrace_profile_lock);
699
700 return ret;
701 }
702
703 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
704 {
705 struct ftrace_profile_page *pg;
706
707 pg = stat->pages = stat->start;
708
709 while (pg) {
710 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
711 pg->index = 0;
712 pg = pg->next;
713 }
714
715 memset(stat->hash, 0,
716 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
717 }
718
719 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
720 {
721 struct ftrace_profile_page *pg;
722 int functions;
723 int pages;
724 int i;
725
726 /* If we already allocated, do nothing */
727 if (stat->pages)
728 return 0;
729
730 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
731 if (!stat->pages)
732 return -ENOMEM;
733
734 #ifdef CONFIG_DYNAMIC_FTRACE
735 functions = ftrace_update_tot_cnt;
736 #else
737 /*
738 * We do not know the number of functions that exist because
739 * dynamic tracing is what counts them. With past experience
740 * we have around 20K functions. That should be more than enough.
741 * It is highly unlikely we will execute every function in
742 * the kernel.
743 */
744 functions = 20000;
745 #endif
746
747 pg = stat->start = stat->pages;
748
749 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
750
751 for (i = 1; i < pages; i++) {
752 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
753 if (!pg->next)
754 goto out_free;
755 pg = pg->next;
756 }
757
758 return 0;
759
760 out_free:
761 pg = stat->start;
762 while (pg) {
763 unsigned long tmp = (unsigned long)pg;
764
765 pg = pg->next;
766 free_page(tmp);
767 }
768
769 stat->pages = NULL;
770 stat->start = NULL;
771
772 return -ENOMEM;
773 }
774
775 static int ftrace_profile_init_cpu(int cpu)
776 {
777 struct ftrace_profile_stat *stat;
778 int size;
779
780 stat = &per_cpu(ftrace_profile_stats, cpu);
781
782 if (stat->hash) {
783 /* If the profile is already created, simply reset it */
784 ftrace_profile_reset(stat);
785 return 0;
786 }
787
788 /*
789 * We are profiling all functions, but usually only a few thousand
790 * functions are hit. We'll make a hash of 1024 items.
791 */
792 size = FTRACE_PROFILE_HASH_SIZE;
793
794 stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
795
796 if (!stat->hash)
797 return -ENOMEM;
798
799 /* Preallocate the function profiling pages */
800 if (ftrace_profile_pages_init(stat) < 0) {
801 kfree(stat->hash);
802 stat->hash = NULL;
803 return -ENOMEM;
804 }
805
806 return 0;
807 }
808
809 static int ftrace_profile_init(void)
810 {
811 int cpu;
812 int ret = 0;
813
814 for_each_possible_cpu(cpu) {
815 ret = ftrace_profile_init_cpu(cpu);
816 if (ret)
817 break;
818 }
819
820 return ret;
821 }
822
823 /* interrupts must be disabled */
824 static struct ftrace_profile *
825 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
826 {
827 struct ftrace_profile *rec;
828 struct hlist_head *hhd;
829 unsigned long key;
830
831 key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
832 hhd = &stat->hash[key];
833
834 if (hlist_empty(hhd))
835 return NULL;
836
837 hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
838 if (rec->ip == ip)
839 return rec;
840 }
841
842 return NULL;
843 }
844
845 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
846 struct ftrace_profile *rec)
847 {
848 unsigned long key;
849
850 key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
851 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
852 }
853
854 /*
855 * The memory is already allocated, this simply finds a new record to use.
856 */
857 static struct ftrace_profile *
858 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
859 {
860 struct ftrace_profile *rec = NULL;
861
862 /* prevent recursion (from NMIs) */
863 if (atomic_inc_return(&stat->disabled) != 1)
864 goto out;
865
866 /*
867 * Try to find the function again since an NMI
868 * could have added it
869 */
870 rec = ftrace_find_profiled_func(stat, ip);
871 if (rec)
872 goto out;
873
874 if (stat->pages->index == PROFILES_PER_PAGE) {
875 if (!stat->pages->next)
876 goto out;
877 stat->pages = stat->pages->next;
878 }
879
880 rec = &stat->pages->records[stat->pages->index++];
881 rec->ip = ip;
882 ftrace_add_profile(stat, rec);
883
884 out:
885 atomic_dec(&stat->disabled);
886
887 return rec;
888 }
889
890 static void
891 function_profile_call(unsigned long ip, unsigned long parent_ip,
892 struct ftrace_ops *ops, struct pt_regs *regs)
893 {
894 struct ftrace_profile_stat *stat;
895 struct ftrace_profile *rec;
896 unsigned long flags;
897
898 if (!ftrace_profile_enabled)
899 return;
900
901 local_irq_save(flags);
902
903 stat = &__get_cpu_var(ftrace_profile_stats);
904 if (!stat->hash || !ftrace_profile_enabled)
905 goto out;
906
907 rec = ftrace_find_profiled_func(stat, ip);
908 if (!rec) {
909 rec = ftrace_profile_alloc(stat, ip);
910 if (!rec)
911 goto out;
912 }
913
914 rec->counter++;
915 out:
916 local_irq_restore(flags);
917 }
918
919 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
920 static int profile_graph_entry(struct ftrace_graph_ent *trace)
921 {
922 function_profile_call(trace->func, 0, NULL, NULL);
923 return 1;
924 }
925
926 static void profile_graph_return(struct ftrace_graph_ret *trace)
927 {
928 struct ftrace_profile_stat *stat;
929 unsigned long long calltime;
930 struct ftrace_profile *rec;
931 unsigned long flags;
932
933 local_irq_save(flags);
934 stat = &__get_cpu_var(ftrace_profile_stats);
935 if (!stat->hash || !ftrace_profile_enabled)
936 goto out;
937
938 /* If the calltime was zero'd ignore it */
939 if (!trace->calltime)
940 goto out;
941
942 calltime = trace->rettime - trace->calltime;
943
944 if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
945 int index;
946
947 index = trace->depth;
948
949 /* Append this call time to the parent time to subtract */
950 if (index)
951 current->ret_stack[index - 1].subtime += calltime;
952
953 if (current->ret_stack[index].subtime < calltime)
954 calltime -= current->ret_stack[index].subtime;
955 else
956 calltime = 0;
957 }
958
959 rec = ftrace_find_profiled_func(stat, trace->func);
960 if (rec) {
961 rec->time += calltime;
962 rec->time_squared += calltime * calltime;
963 }
964
965 out:
966 local_irq_restore(flags);
967 }
968
969 static int register_ftrace_profiler(void)
970 {
971 return register_ftrace_graph(&profile_graph_return,
972 &profile_graph_entry);
973 }
974
975 static void unregister_ftrace_profiler(void)
976 {
977 unregister_ftrace_graph();
978 }
979 #else
980 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
981 .func = function_profile_call,
982 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
983 INIT_REGEX_LOCK(ftrace_profile_ops)
984 };
985
986 static int register_ftrace_profiler(void)
987 {
988 return register_ftrace_function(&ftrace_profile_ops);
989 }
990
991 static void unregister_ftrace_profiler(void)
992 {
993 unregister_ftrace_function(&ftrace_profile_ops);
994 }
995 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
996
997 static ssize_t
998 ftrace_profile_write(struct file *filp, const char __user *ubuf,
999 size_t cnt, loff_t *ppos)
1000 {
1001 unsigned long val;
1002 int ret;
1003
1004 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1005 if (ret)
1006 return ret;
1007
1008 val = !!val;
1009
1010 mutex_lock(&ftrace_profile_lock);
1011 if (ftrace_profile_enabled ^ val) {
1012 if (val) {
1013 ret = ftrace_profile_init();
1014 if (ret < 0) {
1015 cnt = ret;
1016 goto out;
1017 }
1018
1019 ret = register_ftrace_profiler();
1020 if (ret < 0) {
1021 cnt = ret;
1022 goto out;
1023 }
1024 ftrace_profile_enabled = 1;
1025 } else {
1026 ftrace_profile_enabled = 0;
1027 /*
1028 * unregister_ftrace_profiler calls stop_machine
1029 * so this acts like an synchronize_sched.
1030 */
1031 unregister_ftrace_profiler();
1032 }
1033 }
1034 out:
1035 mutex_unlock(&ftrace_profile_lock);
1036
1037 *ppos += cnt;
1038
1039 return cnt;
1040 }
1041
1042 static ssize_t
1043 ftrace_profile_read(struct file *filp, char __user *ubuf,
1044 size_t cnt, loff_t *ppos)
1045 {
1046 char buf[64]; /* big enough to hold a number */
1047 int r;
1048
1049 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
1050 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1051 }
1052
1053 static const struct file_operations ftrace_profile_fops = {
1054 .open = tracing_open_generic,
1055 .read = ftrace_profile_read,
1056 .write = ftrace_profile_write,
1057 .llseek = default_llseek,
1058 };
1059
1060 /* used to initialize the real stat files */
1061 static struct tracer_stat function_stats __initdata = {
1062 .name = "functions",
1063 .stat_start = function_stat_start,
1064 .stat_next = function_stat_next,
1065 .stat_cmp = function_stat_cmp,
1066 .stat_headers = function_stat_headers,
1067 .stat_show = function_stat_show
1068 };
1069
1070 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1071 {
1072 struct ftrace_profile_stat *stat;
1073 struct dentry *entry;
1074 char *name;
1075 int ret;
1076 int cpu;
1077
1078 for_each_possible_cpu(cpu) {
1079 stat = &per_cpu(ftrace_profile_stats, cpu);
1080
1081 /* allocate enough for function name + cpu number */
1082 name = kmalloc(32, GFP_KERNEL);
1083 if (!name) {
1084 /*
1085 * The files created are permanent, if something happens
1086 * we still do not free memory.
1087 */
1088 WARN(1,
1089 "Could not allocate stat file for cpu %d\n",
1090 cpu);
1091 return;
1092 }
1093 stat->stat = function_stats;
1094 snprintf(name, 32, "function%d", cpu);
1095 stat->stat.name = name;
1096 ret = register_stat_tracer(&stat->stat);
1097 if (ret) {
1098 WARN(1,
1099 "Could not register function stat for cpu %d\n",
1100 cpu);
1101 kfree(name);
1102 return;
1103 }
1104 }
1105
1106 entry = debugfs_create_file("function_profile_enabled", 0644,
1107 d_tracer, NULL, &ftrace_profile_fops);
1108 if (!entry)
1109 pr_warning("Could not create debugfs "
1110 "'function_profile_enabled' entry\n");
1111 }
1112
1113 #else /* CONFIG_FUNCTION_PROFILER */
1114 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1115 {
1116 }
1117 #endif /* CONFIG_FUNCTION_PROFILER */
1118
1119 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1120
1121 #ifdef CONFIG_DYNAMIC_FTRACE
1122
1123 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
1124 # error Dynamic ftrace depends on MCOUNT_RECORD
1125 #endif
1126
1127 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
1128
1129 struct ftrace_func_probe {
1130 struct hlist_node node;
1131 struct ftrace_probe_ops *ops;
1132 unsigned long flags;
1133 unsigned long ip;
1134 void *data;
1135 struct list_head free_list;
1136 };
1137
1138 struct ftrace_func_entry {
1139 struct hlist_node hlist;
1140 unsigned long ip;
1141 };
1142
1143 struct ftrace_hash {
1144 unsigned long size_bits;
1145 struct hlist_head *buckets;
1146 unsigned long count;
1147 struct rcu_head rcu;
1148 };
1149
1150 /*
1151 * We make these constant because no one should touch them,
1152 * but they are used as the default "empty hash", to avoid allocating
1153 * it all the time. These are in a read only section such that if
1154 * anyone does try to modify it, it will cause an exception.
1155 */
1156 static const struct hlist_head empty_buckets[1];
1157 static const struct ftrace_hash empty_hash = {
1158 .buckets = (struct hlist_head *)empty_buckets,
1159 };
1160 #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
1161
1162 static struct ftrace_ops global_ops = {
1163 .func = ftrace_stub,
1164 .notrace_hash = EMPTY_HASH,
1165 .filter_hash = EMPTY_HASH,
1166 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
1167 INIT_REGEX_LOCK(global_ops)
1168 };
1169
1170 struct ftrace_page {
1171 struct ftrace_page *next;
1172 struct dyn_ftrace *records;
1173 int index;
1174 int size;
1175 };
1176
1177 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
1178 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1179
1180 /* estimate from running different kernels */
1181 #define NR_TO_INIT 10000
1182
1183 static struct ftrace_page *ftrace_pages_start;
1184 static struct ftrace_page *ftrace_pages;
1185
1186 static bool ftrace_hash_empty(struct ftrace_hash *hash)
1187 {
1188 return !hash || !hash->count;
1189 }
1190
1191 static struct ftrace_func_entry *
1192 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1193 {
1194 unsigned long key;
1195 struct ftrace_func_entry *entry;
1196 struct hlist_head *hhd;
1197
1198 if (ftrace_hash_empty(hash))
1199 return NULL;
1200
1201 if (hash->size_bits > 0)
1202 key = hash_long(ip, hash->size_bits);
1203 else
1204 key = 0;
1205
1206 hhd = &hash->buckets[key];
1207
1208 hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1209 if (entry->ip == ip)
1210 return entry;
1211 }
1212 return NULL;
1213 }
1214
1215 static void __add_hash_entry(struct ftrace_hash *hash,
1216 struct ftrace_func_entry *entry)
1217 {
1218 struct hlist_head *hhd;
1219 unsigned long key;
1220
1221 if (hash->size_bits)
1222 key = hash_long(entry->ip, hash->size_bits);
1223 else
1224 key = 0;
1225
1226 hhd = &hash->buckets[key];
1227 hlist_add_head(&entry->hlist, hhd);
1228 hash->count++;
1229 }
1230
1231 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1232 {
1233 struct ftrace_func_entry *entry;
1234
1235 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1236 if (!entry)
1237 return -ENOMEM;
1238
1239 entry->ip = ip;
1240 __add_hash_entry(hash, entry);
1241
1242 return 0;
1243 }
1244
1245 static void
1246 free_hash_entry(struct ftrace_hash *hash,
1247 struct ftrace_func_entry *entry)
1248 {
1249 hlist_del(&entry->hlist);
1250 kfree(entry);
1251 hash->count--;
1252 }
1253
1254 static void
1255 remove_hash_entry(struct ftrace_hash *hash,
1256 struct ftrace_func_entry *entry)
1257 {
1258 hlist_del(&entry->hlist);
1259 hash->count--;
1260 }
1261
1262 static void ftrace_hash_clear(struct ftrace_hash *hash)
1263 {
1264 struct hlist_head *hhd;
1265 struct hlist_node *tn;
1266 struct ftrace_func_entry *entry;
1267 int size = 1 << hash->size_bits;
1268 int i;
1269
1270 if (!hash->count)
1271 return;
1272
1273 for (i = 0; i < size; i++) {
1274 hhd = &hash->buckets[i];
1275 hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1276 free_hash_entry(hash, entry);
1277 }
1278 FTRACE_WARN_ON(hash->count);
1279 }
1280
1281 static void free_ftrace_hash(struct ftrace_hash *hash)
1282 {
1283 if (!hash || hash == EMPTY_HASH)
1284 return;
1285 ftrace_hash_clear(hash);
1286 kfree(hash->buckets);
1287 kfree(hash);
1288 }
1289
1290 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1291 {
1292 struct ftrace_hash *hash;
1293
1294 hash = container_of(rcu, struct ftrace_hash, rcu);
1295 free_ftrace_hash(hash);
1296 }
1297
1298 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1299 {
1300 if (!hash || hash == EMPTY_HASH)
1301 return;
1302 call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1303 }
1304
1305 void ftrace_free_filter(struct ftrace_ops *ops)
1306 {
1307 ftrace_ops_init(ops);
1308 free_ftrace_hash(ops->filter_hash);
1309 free_ftrace_hash(ops->notrace_hash);
1310 }
1311
1312 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1313 {
1314 struct ftrace_hash *hash;
1315 int size;
1316
1317 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1318 if (!hash)
1319 return NULL;
1320
1321 size = 1 << size_bits;
1322 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1323
1324 if (!hash->buckets) {
1325 kfree(hash);
1326 return NULL;
1327 }
1328
1329 hash->size_bits = size_bits;
1330
1331 return hash;
1332 }
1333
1334 static struct ftrace_hash *
1335 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1336 {
1337 struct ftrace_func_entry *entry;
1338 struct ftrace_hash *new_hash;
1339 int size;
1340 int ret;
1341 int i;
1342
1343 new_hash = alloc_ftrace_hash(size_bits);
1344 if (!new_hash)
1345 return NULL;
1346
1347 /* Empty hash? */
1348 if (ftrace_hash_empty(hash))
1349 return new_hash;
1350
1351 size = 1 << hash->size_bits;
1352 for (i = 0; i < size; i++) {
1353 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1354 ret = add_hash_entry(new_hash, entry->ip);
1355 if (ret < 0)
1356 goto free_hash;
1357 }
1358 }
1359
1360 FTRACE_WARN_ON(new_hash->count != hash->count);
1361
1362 return new_hash;
1363
1364 free_hash:
1365 free_ftrace_hash(new_hash);
1366 return NULL;
1367 }
1368
1369 static void
1370 ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
1371 static void
1372 ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
1373
1374 static int
1375 ftrace_hash_move(struct ftrace_ops *ops, int enable,
1376 struct ftrace_hash **dst, struct ftrace_hash *src)
1377 {
1378 struct ftrace_func_entry *entry;
1379 struct hlist_node *tn;
1380 struct hlist_head *hhd;
1381 struct ftrace_hash *old_hash;
1382 struct ftrace_hash *new_hash;
1383 int size = src->count;
1384 int bits = 0;
1385 int ret;
1386 int i;
1387
1388 /*
1389 * Remove the current set, update the hash and add
1390 * them back.
1391 */
1392 ftrace_hash_rec_disable(ops, enable);
1393
1394 /*
1395 * If the new source is empty, just free dst and assign it
1396 * the empty_hash.
1397 */
1398 if (!src->count) {
1399 free_ftrace_hash_rcu(*dst);
1400 rcu_assign_pointer(*dst, EMPTY_HASH);
1401 /* still need to update the function records */
1402 ret = 0;
1403 goto out;
1404 }
1405
1406 /*
1407 * Make the hash size about 1/2 the # found
1408 */
1409 for (size /= 2; size; size >>= 1)
1410 bits++;
1411
1412 /* Don't allocate too much */
1413 if (bits > FTRACE_HASH_MAX_BITS)
1414 bits = FTRACE_HASH_MAX_BITS;
1415
1416 ret = -ENOMEM;
1417 new_hash = alloc_ftrace_hash(bits);
1418 if (!new_hash)
1419 goto out;
1420
1421 size = 1 << src->size_bits;
1422 for (i = 0; i < size; i++) {
1423 hhd = &src->buckets[i];
1424 hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1425 remove_hash_entry(src, entry);
1426 __add_hash_entry(new_hash, entry);
1427 }
1428 }
1429
1430 old_hash = *dst;
1431 rcu_assign_pointer(*dst, new_hash);
1432 free_ftrace_hash_rcu(old_hash);
1433
1434 ret = 0;
1435 out:
1436 /*
1437 * Enable regardless of ret:
1438 * On success, we enable the new hash.
1439 * On failure, we re-enable the original hash.
1440 */
1441 ftrace_hash_rec_enable(ops, enable);
1442
1443 return ret;
1444 }
1445
1446 /*
1447 * Test the hashes for this ops to see if we want to call
1448 * the ops->func or not.
1449 *
1450 * It's a match if the ip is in the ops->filter_hash or
1451 * the filter_hash does not exist or is empty,
1452 * AND
1453 * the ip is not in the ops->notrace_hash.
1454 *
1455 * This needs to be called with preemption disabled as
1456 * the hashes are freed with call_rcu_sched().
1457 */
1458 static int
1459 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1460 {
1461 struct ftrace_hash *filter_hash;
1462 struct ftrace_hash *notrace_hash;
1463 int ret;
1464
1465 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1466 /*
1467 * There's a small race when adding ops that the ftrace handler
1468 * that wants regs, may be called without them. We can not
1469 * allow that handler to be called if regs is NULL.
1470 */
1471 if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1472 return 0;
1473 #endif
1474
1475 filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
1476 notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
1477
1478 if ((ftrace_hash_empty(filter_hash) ||
1479 ftrace_lookup_ip(filter_hash, ip)) &&
1480 (ftrace_hash_empty(notrace_hash) ||
1481 !ftrace_lookup_ip(notrace_hash, ip)))
1482 ret = 1;
1483 else
1484 ret = 0;
1485
1486 return ret;
1487 }
1488
1489 /*
1490 * This is a double for. Do not use 'break' to break out of the loop,
1491 * you must use a goto.
1492 */
1493 #define do_for_each_ftrace_rec(pg, rec) \
1494 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1495 int _____i; \
1496 for (_____i = 0; _____i < pg->index; _____i++) { \
1497 rec = &pg->records[_____i];
1498
1499 #define while_for_each_ftrace_rec() \
1500 } \
1501 }
1502
1503
1504 static int ftrace_cmp_recs(const void *a, const void *b)
1505 {
1506 const struct dyn_ftrace *key = a;
1507 const struct dyn_ftrace *rec = b;
1508
1509 if (key->flags < rec->ip)
1510 return -1;
1511 if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1512 return 1;
1513 return 0;
1514 }
1515
1516 static unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1517 {
1518 struct ftrace_page *pg;
1519 struct dyn_ftrace *rec;
1520 struct dyn_ftrace key;
1521
1522 key.ip = start;
1523 key.flags = end; /* overload flags, as it is unsigned long */
1524
1525 for (pg = ftrace_pages_start; pg; pg = pg->next) {
1526 if (end < pg->records[0].ip ||
1527 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1528 continue;
1529 rec = bsearch(&key, pg->records, pg->index,
1530 sizeof(struct dyn_ftrace),
1531 ftrace_cmp_recs);
1532 if (rec)
1533 return rec->ip;
1534 }
1535
1536 return 0;
1537 }
1538
1539 /**
1540 * ftrace_location - return true if the ip giving is a traced location
1541 * @ip: the instruction pointer to check
1542 *
1543 * Returns rec->ip if @ip given is a pointer to a ftrace location.
1544 * That is, the instruction that is either a NOP or call to
1545 * the function tracer. It checks the ftrace internal tables to
1546 * determine if the address belongs or not.
1547 */
1548 unsigned long ftrace_location(unsigned long ip)
1549 {
1550 return ftrace_location_range(ip, ip);
1551 }
1552
1553 /**
1554 * ftrace_text_reserved - return true if range contains an ftrace location
1555 * @start: start of range to search
1556 * @end: end of range to search (inclusive). @end points to the last byte to check.
1557 *
1558 * Returns 1 if @start and @end contains a ftrace location.
1559 * That is, the instruction that is either a NOP or call to
1560 * the function tracer. It checks the ftrace internal tables to
1561 * determine if the address belongs or not.
1562 */
1563 int ftrace_text_reserved(void *start, void *end)
1564 {
1565 unsigned long ret;
1566
1567 ret = ftrace_location_range((unsigned long)start,
1568 (unsigned long)end);
1569
1570 return (int)!!ret;
1571 }
1572
1573 static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1574 int filter_hash,
1575 bool inc)
1576 {
1577 struct ftrace_hash *hash;
1578 struct ftrace_hash *other_hash;
1579 struct ftrace_page *pg;
1580 struct dyn_ftrace *rec;
1581 int count = 0;
1582 int all = 0;
1583
1584 /* Only update if the ops has been registered */
1585 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1586 return;
1587
1588 /*
1589 * In the filter_hash case:
1590 * If the count is zero, we update all records.
1591 * Otherwise we just update the items in the hash.
1592 *
1593 * In the notrace_hash case:
1594 * We enable the update in the hash.
1595 * As disabling notrace means enabling the tracing,
1596 * and enabling notrace means disabling, the inc variable
1597 * gets inversed.
1598 */
1599 if (filter_hash) {
1600 hash = ops->filter_hash;
1601 other_hash = ops->notrace_hash;
1602 if (ftrace_hash_empty(hash))
1603 all = 1;
1604 } else {
1605 inc = !inc;
1606 hash = ops->notrace_hash;
1607 other_hash = ops->filter_hash;
1608 /*
1609 * If the notrace hash has no items,
1610 * then there's nothing to do.
1611 */
1612 if (ftrace_hash_empty(hash))
1613 return;
1614 }
1615
1616 do_for_each_ftrace_rec(pg, rec) {
1617 int in_other_hash = 0;
1618 int in_hash = 0;
1619 int match = 0;
1620
1621 if (all) {
1622 /*
1623 * Only the filter_hash affects all records.
1624 * Update if the record is not in the notrace hash.
1625 */
1626 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1627 match = 1;
1628 } else {
1629 in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1630 in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1631
1632 /*
1633 *
1634 */
1635 if (filter_hash && in_hash && !in_other_hash)
1636 match = 1;
1637 else if (!filter_hash && in_hash &&
1638 (in_other_hash || ftrace_hash_empty(other_hash)))
1639 match = 1;
1640 }
1641 if (!match)
1642 continue;
1643
1644 if (inc) {
1645 rec->flags++;
1646 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1647 return;
1648 /*
1649 * If any ops wants regs saved for this function
1650 * then all ops will get saved regs.
1651 */
1652 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1653 rec->flags |= FTRACE_FL_REGS;
1654 } else {
1655 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1656 return;
1657 rec->flags--;
1658 }
1659 count++;
1660 /* Shortcut, if we handled all records, we are done. */
1661 if (!all && count == hash->count)
1662 return;
1663 } while_for_each_ftrace_rec();
1664 }
1665
1666 static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1667 int filter_hash)
1668 {
1669 __ftrace_hash_rec_update(ops, filter_hash, 0);
1670 }
1671
1672 static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1673 int filter_hash)
1674 {
1675 __ftrace_hash_rec_update(ops, filter_hash, 1);
1676 }
1677
1678 static void print_ip_ins(const char *fmt, unsigned char *p)
1679 {
1680 int i;
1681
1682 printk(KERN_CONT "%s", fmt);
1683
1684 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1685 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1686 }
1687
1688 /**
1689 * ftrace_bug - report and shutdown function tracer
1690 * @failed: The failed type (EFAULT, EINVAL, EPERM)
1691 * @ip: The address that failed
1692 *
1693 * The arch code that enables or disables the function tracing
1694 * can call ftrace_bug() when it has detected a problem in
1695 * modifying the code. @failed should be one of either:
1696 * EFAULT - if the problem happens on reading the @ip address
1697 * EINVAL - if what is read at @ip is not what was expected
1698 * EPERM - if the problem happens on writting to the @ip address
1699 */
1700 void ftrace_bug(int failed, unsigned long ip)
1701 {
1702 switch (failed) {
1703 case -EFAULT:
1704 FTRACE_WARN_ON_ONCE(1);
1705 pr_info("ftrace faulted on modifying ");
1706 print_ip_sym(ip);
1707 break;
1708 case -EINVAL:
1709 FTRACE_WARN_ON_ONCE(1);
1710 pr_info("ftrace failed to modify ");
1711 print_ip_sym(ip);
1712 print_ip_ins(" actual: ", (unsigned char *)ip);
1713 printk(KERN_CONT "\n");
1714 break;
1715 case -EPERM:
1716 FTRACE_WARN_ON_ONCE(1);
1717 pr_info("ftrace faulted on writing ");
1718 print_ip_sym(ip);
1719 break;
1720 default:
1721 FTRACE_WARN_ON_ONCE(1);
1722 pr_info("ftrace faulted on unknown error ");
1723 print_ip_sym(ip);
1724 }
1725 }
1726
1727 static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
1728 {
1729 unsigned long flag = 0UL;
1730
1731 /*
1732 * If we are updating calls:
1733 *
1734 * If the record has a ref count, then we need to enable it
1735 * because someone is using it.
1736 *
1737 * Otherwise we make sure its disabled.
1738 *
1739 * If we are disabling calls, then disable all records that
1740 * are enabled.
1741 */
1742 if (enable && (rec->flags & ~FTRACE_FL_MASK))
1743 flag = FTRACE_FL_ENABLED;
1744
1745 /*
1746 * If enabling and the REGS flag does not match the REGS_EN, then
1747 * do not ignore this record. Set flags to fail the compare against
1748 * ENABLED.
1749 */
1750 if (flag &&
1751 (!(rec->flags & FTRACE_FL_REGS) != !(rec->flags & FTRACE_FL_REGS_EN)))
1752 flag |= FTRACE_FL_REGS;
1753
1754 /* If the state of this record hasn't changed, then do nothing */
1755 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1756 return FTRACE_UPDATE_IGNORE;
1757
1758 if (flag) {
1759 /* Save off if rec is being enabled (for return value) */
1760 flag ^= rec->flags & FTRACE_FL_ENABLED;
1761
1762 if (update) {
1763 rec->flags |= FTRACE_FL_ENABLED;
1764 if (flag & FTRACE_FL_REGS) {
1765 if (rec->flags & FTRACE_FL_REGS)
1766 rec->flags |= FTRACE_FL_REGS_EN;
1767 else
1768 rec->flags &= ~FTRACE_FL_REGS_EN;
1769 }
1770 }
1771
1772 /*
1773 * If this record is being updated from a nop, then
1774 * return UPDATE_MAKE_CALL.
1775 * Otherwise, if the EN flag is set, then return
1776 * UPDATE_MODIFY_CALL_REGS to tell the caller to convert
1777 * from the non-save regs, to a save regs function.
1778 * Otherwise,
1779 * return UPDATE_MODIFY_CALL to tell the caller to convert
1780 * from the save regs, to a non-save regs function.
1781 */
1782 if (flag & FTRACE_FL_ENABLED)
1783 return FTRACE_UPDATE_MAKE_CALL;
1784 else if (rec->flags & FTRACE_FL_REGS_EN)
1785 return FTRACE_UPDATE_MODIFY_CALL_REGS;
1786 else
1787 return FTRACE_UPDATE_MODIFY_CALL;
1788 }
1789
1790 if (update) {
1791 /* If there's no more users, clear all flags */
1792 if (!(rec->flags & ~FTRACE_FL_MASK))
1793 rec->flags = 0;
1794 else
1795 /* Just disable the record (keep REGS state) */
1796 rec->flags &= ~FTRACE_FL_ENABLED;
1797 }
1798
1799 return FTRACE_UPDATE_MAKE_NOP;
1800 }
1801
1802 /**
1803 * ftrace_update_record, set a record that now is tracing or not
1804 * @rec: the record to update
1805 * @enable: set to 1 if the record is tracing, zero to force disable
1806 *
1807 * The records that represent all functions that can be traced need
1808 * to be updated when tracing has been enabled.
1809 */
1810 int ftrace_update_record(struct dyn_ftrace *rec, int enable)
1811 {
1812 return ftrace_check_record(rec, enable, 1);
1813 }
1814
1815 /**
1816 * ftrace_test_record, check if the record has been enabled or not
1817 * @rec: the record to test
1818 * @enable: set to 1 to check if enabled, 0 if it is disabled
1819 *
1820 * The arch code may need to test if a record is already set to
1821 * tracing to determine how to modify the function code that it
1822 * represents.
1823 */
1824 int ftrace_test_record(struct dyn_ftrace *rec, int enable)
1825 {
1826 return ftrace_check_record(rec, enable, 0);
1827 }
1828
1829 static int
1830 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1831 {
1832 unsigned long ftrace_old_addr;
1833 unsigned long ftrace_addr;
1834 int ret;
1835
1836 ret = ftrace_update_record(rec, enable);
1837
1838 if (rec->flags & FTRACE_FL_REGS)
1839 ftrace_addr = (unsigned long)FTRACE_REGS_ADDR;
1840 else
1841 ftrace_addr = (unsigned long)FTRACE_ADDR;
1842
1843 switch (ret) {
1844 case FTRACE_UPDATE_IGNORE:
1845 return 0;
1846
1847 case FTRACE_UPDATE_MAKE_CALL:
1848 return ftrace_make_call(rec, ftrace_addr);
1849
1850 case FTRACE_UPDATE_MAKE_NOP:
1851 return ftrace_make_nop(NULL, rec, ftrace_addr);
1852
1853 case FTRACE_UPDATE_MODIFY_CALL_REGS:
1854 case FTRACE_UPDATE_MODIFY_CALL:
1855 if (rec->flags & FTRACE_FL_REGS)
1856 ftrace_old_addr = (unsigned long)FTRACE_ADDR;
1857 else
1858 ftrace_old_addr = (unsigned long)FTRACE_REGS_ADDR;
1859
1860 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
1861 }
1862
1863 return -1; /* unknow ftrace bug */
1864 }
1865
1866 void __weak ftrace_replace_code(int enable)
1867 {
1868 struct dyn_ftrace *rec;
1869 struct ftrace_page *pg;
1870 int failed;
1871
1872 if (unlikely(ftrace_disabled))
1873 return;
1874
1875 do_for_each_ftrace_rec(pg, rec) {
1876 failed = __ftrace_replace_code(rec, enable);
1877 if (failed) {
1878 ftrace_bug(failed, rec->ip);
1879 /* Stop processing */
1880 return;
1881 }
1882 } while_for_each_ftrace_rec();
1883 }
1884
1885 struct ftrace_rec_iter {
1886 struct ftrace_page *pg;
1887 int index;
1888 };
1889
1890 /**
1891 * ftrace_rec_iter_start, start up iterating over traced functions
1892 *
1893 * Returns an iterator handle that is used to iterate over all
1894 * the records that represent address locations where functions
1895 * are traced.
1896 *
1897 * May return NULL if no records are available.
1898 */
1899 struct ftrace_rec_iter *ftrace_rec_iter_start(void)
1900 {
1901 /*
1902 * We only use a single iterator.
1903 * Protected by the ftrace_lock mutex.
1904 */
1905 static struct ftrace_rec_iter ftrace_rec_iter;
1906 struct ftrace_rec_iter *iter = &ftrace_rec_iter;
1907
1908 iter->pg = ftrace_pages_start;
1909 iter->index = 0;
1910
1911 /* Could have empty pages */
1912 while (iter->pg && !iter->pg->index)
1913 iter->pg = iter->pg->next;
1914
1915 if (!iter->pg)
1916 return NULL;
1917
1918 return iter;
1919 }
1920
1921 /**
1922 * ftrace_rec_iter_next, get the next record to process.
1923 * @iter: The handle to the iterator.
1924 *
1925 * Returns the next iterator after the given iterator @iter.
1926 */
1927 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
1928 {
1929 iter->index++;
1930
1931 if (iter->index >= iter->pg->index) {
1932 iter->pg = iter->pg->next;
1933 iter->index = 0;
1934
1935 /* Could have empty pages */
1936 while (iter->pg && !iter->pg->index)
1937 iter->pg = iter->pg->next;
1938 }
1939
1940 if (!iter->pg)
1941 return NULL;
1942
1943 return iter;
1944 }
1945
1946 /**
1947 * ftrace_rec_iter_record, get the record at the iterator location
1948 * @iter: The current iterator location
1949 *
1950 * Returns the record that the current @iter is at.
1951 */
1952 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
1953 {
1954 return &iter->pg->records[iter->index];
1955 }
1956
1957 static int
1958 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1959 {
1960 unsigned long ip;
1961 int ret;
1962
1963 ip = rec->ip;
1964
1965 if (unlikely(ftrace_disabled))
1966 return 0;
1967
1968 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1969 if (ret) {
1970 ftrace_bug(ret, ip);
1971 return 0;
1972 }
1973 return 1;
1974 }
1975
1976 /*
1977 * archs can override this function if they must do something
1978 * before the modifying code is performed.
1979 */
1980 int __weak ftrace_arch_code_modify_prepare(void)
1981 {
1982 return 0;
1983 }
1984
1985 /*
1986 * archs can override this function if they must do something
1987 * after the modifying code is performed.
1988 */
1989 int __weak ftrace_arch_code_modify_post_process(void)
1990 {
1991 return 0;
1992 }
1993
1994 void ftrace_modify_all_code(int command)
1995 {
1996 int update = command & FTRACE_UPDATE_TRACE_FUNC;
1997
1998 /*
1999 * If the ftrace_caller calls a ftrace_ops func directly,
2000 * we need to make sure that it only traces functions it
2001 * expects to trace. When doing the switch of functions,
2002 * we need to update to the ftrace_ops_list_func first
2003 * before the transition between old and new calls are set,
2004 * as the ftrace_ops_list_func will check the ops hashes
2005 * to make sure the ops are having the right functions
2006 * traced.
2007 */
2008 if (update)
2009 ftrace_update_ftrace_func(ftrace_ops_list_func);
2010
2011 if (command & FTRACE_UPDATE_CALLS)
2012 ftrace_replace_code(1);
2013 else if (command & FTRACE_DISABLE_CALLS)
2014 ftrace_replace_code(0);
2015
2016 if (update && ftrace_trace_function != ftrace_ops_list_func) {
2017 function_trace_op = set_function_trace_op;
2018 smp_wmb();
2019 /* If irqs are disabled, we are in stop machine */
2020 if (!irqs_disabled())
2021 smp_call_function(ftrace_sync_ipi, NULL, 1);
2022 ftrace_update_ftrace_func(ftrace_trace_function);
2023 }
2024
2025 if (command & FTRACE_START_FUNC_RET)
2026 ftrace_enable_ftrace_graph_caller();
2027 else if (command & FTRACE_STOP_FUNC_RET)
2028 ftrace_disable_ftrace_graph_caller();
2029 }
2030
2031 static int __ftrace_modify_code(void *data)
2032 {
2033 int *command = data;
2034
2035 ftrace_modify_all_code(*command);
2036
2037 return 0;
2038 }
2039
2040 /**
2041 * ftrace_run_stop_machine, go back to the stop machine method
2042 * @command: The command to tell ftrace what to do
2043 *
2044 * If an arch needs to fall back to the stop machine method, the
2045 * it can call this function.
2046 */
2047 void ftrace_run_stop_machine(int command)
2048 {
2049 stop_machine(__ftrace_modify_code, &command, NULL);
2050 }
2051
2052 /**
2053 * arch_ftrace_update_code, modify the code to trace or not trace
2054 * @command: The command that needs to be done
2055 *
2056 * Archs can override this function if it does not need to
2057 * run stop_machine() to modify code.
2058 */
2059 void __weak arch_ftrace_update_code(int command)
2060 {
2061 ftrace_run_stop_machine(command);
2062 }
2063
2064 static void ftrace_run_update_code(int command)
2065 {
2066 int ret;
2067
2068 ret = ftrace_arch_code_modify_prepare();
2069 FTRACE_WARN_ON(ret);
2070 if (ret)
2071 return;
2072 /*
2073 * Do not call function tracer while we update the code.
2074 * We are in stop machine.
2075 */
2076 function_trace_stop++;
2077
2078 /*
2079 * By default we use stop_machine() to modify the code.
2080 * But archs can do what ever they want as long as it
2081 * is safe. The stop_machine() is the safest, but also
2082 * produces the most overhead.
2083 */
2084 arch_ftrace_update_code(command);
2085
2086 function_trace_stop--;
2087
2088 ret = ftrace_arch_code_modify_post_process();
2089 FTRACE_WARN_ON(ret);
2090 }
2091
2092 static ftrace_func_t saved_ftrace_func;
2093 static int ftrace_start_up;
2094 static int global_start_up;
2095
2096 static void ftrace_startup_enable(int command)
2097 {
2098 if (saved_ftrace_func != ftrace_trace_function) {
2099 saved_ftrace_func = ftrace_trace_function;
2100 command |= FTRACE_UPDATE_TRACE_FUNC;
2101 }
2102
2103 if (!command || !ftrace_enabled)
2104 return;
2105
2106 ftrace_run_update_code(command);
2107 }
2108
2109 static int ftrace_startup(struct ftrace_ops *ops, int command)
2110 {
2111 bool hash_enable = true;
2112 int ret;
2113
2114 if (unlikely(ftrace_disabled))
2115 return -ENODEV;
2116
2117 ret = __register_ftrace_function(ops);
2118 if (ret)
2119 return ret;
2120
2121 ftrace_start_up++;
2122 command |= FTRACE_UPDATE_CALLS;
2123
2124 /* ops marked global share the filter hashes */
2125 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
2126 ops = &global_ops;
2127 /* Don't update hash if global is already set */
2128 if (global_start_up)
2129 hash_enable = false;
2130 global_start_up++;
2131 }
2132
2133 ops->flags |= FTRACE_OPS_FL_ENABLED;
2134 if (hash_enable)
2135 ftrace_hash_rec_enable(ops, 1);
2136
2137 ftrace_startup_enable(command);
2138
2139 return 0;
2140 }
2141
2142 static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2143 {
2144 bool hash_disable = true;
2145 int ret;
2146
2147 if (unlikely(ftrace_disabled))
2148 return -ENODEV;
2149
2150 ret = __unregister_ftrace_function(ops);
2151 if (ret)
2152 return ret;
2153
2154 ftrace_start_up--;
2155 /*
2156 * Just warn in case of unbalance, no need to kill ftrace, it's not
2157 * critical but the ftrace_call callers may be never nopped again after
2158 * further ftrace uses.
2159 */
2160 WARN_ON_ONCE(ftrace_start_up < 0);
2161
2162 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
2163 ops = &global_ops;
2164 global_start_up--;
2165 WARN_ON_ONCE(global_start_up < 0);
2166 /* Don't update hash if global still has users */
2167 if (global_start_up) {
2168 WARN_ON_ONCE(!ftrace_start_up);
2169 hash_disable = false;
2170 }
2171 }
2172
2173 if (hash_disable)
2174 ftrace_hash_rec_disable(ops, 1);
2175
2176 if (ops != &global_ops || !global_start_up)
2177 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2178
2179 command |= FTRACE_UPDATE_CALLS;
2180
2181 if (saved_ftrace_func != ftrace_trace_function) {
2182 saved_ftrace_func = ftrace_trace_function;
2183 command |= FTRACE_UPDATE_TRACE_FUNC;
2184 }
2185
2186 if (!command || !ftrace_enabled) {
2187 /*
2188 * If these are control ops, they still need their
2189 * per_cpu field freed. Since, function tracing is
2190 * not currently active, we can just free them
2191 * without synchronizing all CPUs.
2192 */
2193 if (ops->flags & FTRACE_OPS_FL_CONTROL)
2194 control_ops_free(ops);
2195 return 0;
2196 }
2197
2198 ftrace_run_update_code(command);
2199
2200 /*
2201 * Dynamic ops may be freed, we must make sure that all
2202 * callers are done before leaving this function.
2203 * The same goes for freeing the per_cpu data of the control
2204 * ops.
2205 *
2206 * Again, normal synchronize_sched() is not good enough.
2207 * We need to do a hard force of sched synchronization.
2208 * This is because we use preempt_disable() to do RCU, but
2209 * the function tracers can be called where RCU is not watching
2210 * (like before user_exit()). We can not rely on the RCU
2211 * infrastructure to do the synchronization, thus we must do it
2212 * ourselves.
2213 */
2214 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
2215 schedule_on_each_cpu(ftrace_sync);
2216
2217 if (ops->flags & FTRACE_OPS_FL_CONTROL)
2218 control_ops_free(ops);
2219 }
2220
2221 return 0;
2222 }
2223
2224 static void ftrace_startup_sysctl(void)
2225 {
2226 if (unlikely(ftrace_disabled))
2227 return;
2228
2229 /* Force update next time */
2230 saved_ftrace_func = NULL;
2231 /* ftrace_start_up is true if we want ftrace running */
2232 if (ftrace_start_up)
2233 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
2234 }
2235
2236 static void ftrace_shutdown_sysctl(void)
2237 {
2238 if (unlikely(ftrace_disabled))
2239 return;
2240
2241 /* ftrace_start_up is true if ftrace is running */
2242 if (ftrace_start_up)
2243 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
2244 }
2245
2246 static cycle_t ftrace_update_time;
2247 unsigned long ftrace_update_tot_cnt;
2248
2249 static inline int ops_traces_mod(struct ftrace_ops *ops)
2250 {
2251 /*
2252 * Filter_hash being empty will default to trace module.
2253 * But notrace hash requires a test of individual module functions.
2254 */
2255 return ftrace_hash_empty(ops->filter_hash) &&
2256 ftrace_hash_empty(ops->notrace_hash);
2257 }
2258
2259 /*
2260 * Check if the current ops references the record.
2261 *
2262 * If the ops traces all functions, then it was already accounted for.
2263 * If the ops does not trace the current record function, skip it.
2264 * If the ops ignores the function via notrace filter, skip it.
2265 */
2266 static inline bool
2267 ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
2268 {
2269 /* If ops isn't enabled, ignore it */
2270 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
2271 return 0;
2272
2273 /* If ops traces all mods, we already accounted for it */
2274 if (ops_traces_mod(ops))
2275 return 0;
2276
2277 /* The function must be in the filter */
2278 if (!ftrace_hash_empty(ops->filter_hash) &&
2279 !ftrace_lookup_ip(ops->filter_hash, rec->ip))
2280 return 0;
2281
2282 /* If in notrace hash, we ignore it too */
2283 if (ftrace_lookup_ip(ops->notrace_hash, rec->ip))
2284 return 0;
2285
2286 return 1;
2287 }
2288
2289 static int referenced_filters(struct dyn_ftrace *rec)
2290 {
2291 struct ftrace_ops *ops;
2292 int cnt = 0;
2293
2294 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
2295 if (ops_references_rec(ops, rec))
2296 cnt++;
2297 }
2298
2299 return cnt;
2300 }
2301
2302 static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
2303 {
2304 struct ftrace_page *pg;
2305 struct dyn_ftrace *p;
2306 cycle_t start, stop;
2307 unsigned long update_cnt = 0;
2308 unsigned long ref = 0;
2309 bool test = false;
2310 int i;
2311
2312 /*
2313 * When adding a module, we need to check if tracers are
2314 * currently enabled and if they are set to trace all functions.
2315 * If they are, we need to enable the module functions as well
2316 * as update the reference counts for those function records.
2317 */
2318 if (mod) {
2319 struct ftrace_ops *ops;
2320
2321 for (ops = ftrace_ops_list;
2322 ops != &ftrace_list_end; ops = ops->next) {
2323 if (ops->flags & FTRACE_OPS_FL_ENABLED) {
2324 if (ops_traces_mod(ops))
2325 ref++;
2326 else
2327 test = true;
2328 }
2329 }
2330 }
2331
2332 start = ftrace_now(raw_smp_processor_id());
2333
2334 for (pg = new_pgs; pg; pg = pg->next) {
2335
2336 for (i = 0; i < pg->index; i++) {
2337 int cnt = ref;
2338
2339 /* If something went wrong, bail without enabling anything */
2340 if (unlikely(ftrace_disabled))
2341 return -1;
2342
2343 p = &pg->records[i];
2344 if (test)
2345 cnt += referenced_filters(p);
2346 p->flags = cnt;
2347
2348 /*
2349 * Do the initial record conversion from mcount jump
2350 * to the NOP instructions.
2351 */
2352 if (!ftrace_code_disable(mod, p))
2353 break;
2354
2355 update_cnt++;
2356
2357 /*
2358 * If the tracing is enabled, go ahead and enable the record.
2359 *
2360 * The reason not to enable the record immediatelly is the
2361 * inherent check of ftrace_make_nop/ftrace_make_call for
2362 * correct previous instructions. Making first the NOP
2363 * conversion puts the module to the correct state, thus
2364 * passing the ftrace_make_call check.
2365 */
2366 if (ftrace_start_up && cnt) {
2367 int failed = __ftrace_replace_code(p, 1);
2368 if (failed)
2369 ftrace_bug(failed, p->ip);
2370 }
2371 }
2372 }
2373
2374 stop = ftrace_now(raw_smp_processor_id());
2375 ftrace_update_time = stop - start;
2376 ftrace_update_tot_cnt += update_cnt;
2377
2378 return 0;
2379 }
2380
2381 static int ftrace_allocate_records(struct ftrace_page *pg, int count)
2382 {
2383 int order;
2384 int cnt;
2385
2386 if (WARN_ON(!count))
2387 return -EINVAL;
2388
2389 order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
2390
2391 /*
2392 * We want to fill as much as possible. No more than a page
2393 * may be empty.
2394 */
2395 while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2396 order--;
2397
2398 again:
2399 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2400
2401 if (!pg->records) {
2402 /* if we can't allocate this size, try something smaller */
2403 if (!order)
2404 return -ENOMEM;
2405 order >>= 1;
2406 goto again;
2407 }
2408
2409 cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
2410 pg->size = cnt;
2411
2412 if (cnt > count)
2413 cnt = count;
2414
2415 return cnt;
2416 }
2417
2418 static struct ftrace_page *
2419 ftrace_allocate_pages(unsigned long num_to_init)
2420 {
2421 struct ftrace_page *start_pg;
2422 struct ftrace_page *pg;
2423 int order;
2424 int cnt;
2425
2426 if (!num_to_init)
2427 return 0;
2428
2429 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
2430 if (!pg)
2431 return NULL;
2432
2433 /*
2434 * Try to allocate as much as possible in one continues
2435 * location that fills in all of the space. We want to
2436 * waste as little space as possible.
2437 */
2438 for (;;) {
2439 cnt = ftrace_allocate_records(pg, num_to_init);
2440 if (cnt < 0)
2441 goto free_pages;
2442
2443 num_to_init -= cnt;
2444 if (!num_to_init)
2445 break;
2446
2447 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
2448 if (!pg->next)
2449 goto free_pages;
2450
2451 pg = pg->next;
2452 }
2453
2454 return start_pg;
2455
2456 free_pages:
2457 while (start_pg) {
2458 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
2459 free_pages((unsigned long)pg->records, order);
2460 start_pg = pg->next;
2461 kfree(pg);
2462 pg = start_pg;
2463 }
2464 pr_info("ftrace: FAILED to allocate memory for functions\n");
2465 return NULL;
2466 }
2467
2468 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
2469 {
2470 int cnt;
2471
2472 if (!num_to_init) {
2473 pr_info("ftrace: No functions to be traced?\n");
2474 return -1;
2475 }
2476
2477 cnt = num_to_init / ENTRIES_PER_PAGE;
2478 pr_info("ftrace: allocating %ld entries in %d pages\n",
2479 num_to_init, cnt + 1);
2480
2481 return 0;
2482 }
2483
2484 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2485
2486 struct ftrace_iterator {
2487 loff_t pos;
2488 loff_t func_pos;
2489 struct ftrace_page *pg;
2490 struct dyn_ftrace *func;
2491 struct ftrace_func_probe *probe;
2492 struct trace_parser parser;
2493 struct ftrace_hash *hash;
2494 struct ftrace_ops *ops;
2495 int hidx;
2496 int idx;
2497 unsigned flags;
2498 };
2499
2500 static void *
2501 t_hash_next(struct seq_file *m, loff_t *pos)
2502 {
2503 struct ftrace_iterator *iter = m->private;
2504 struct hlist_node *hnd = NULL;
2505 struct hlist_head *hhd;
2506
2507 (*pos)++;
2508 iter->pos = *pos;
2509
2510 if (iter->probe)
2511 hnd = &iter->probe->node;
2512 retry:
2513 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
2514 return NULL;
2515
2516 hhd = &ftrace_func_hash[iter->hidx];
2517
2518 if (hlist_empty(hhd)) {
2519 iter->hidx++;
2520 hnd = NULL;
2521 goto retry;
2522 }
2523
2524 if (!hnd)
2525 hnd = hhd->first;
2526 else {
2527 hnd = hnd->next;
2528 if (!hnd) {
2529 iter->hidx++;
2530 goto retry;
2531 }
2532 }
2533
2534 if (WARN_ON_ONCE(!hnd))
2535 return NULL;
2536
2537 iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
2538
2539 return iter;
2540 }
2541
2542 static void *t_hash_start(struct seq_file *m, loff_t *pos)
2543 {
2544 struct ftrace_iterator *iter = m->private;
2545 void *p = NULL;
2546 loff_t l;
2547
2548 if (!(iter->flags & FTRACE_ITER_DO_HASH))
2549 return NULL;
2550
2551 if (iter->func_pos > *pos)
2552 return NULL;
2553
2554 iter->hidx = 0;
2555 for (l = 0; l <= (*pos - iter->func_pos); ) {
2556 p = t_hash_next(m, &l);
2557 if (!p)
2558 break;
2559 }
2560 if (!p)
2561 return NULL;
2562
2563 /* Only set this if we have an item */
2564 iter->flags |= FTRACE_ITER_HASH;
2565
2566 return iter;
2567 }
2568
2569 static int
2570 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
2571 {
2572 struct ftrace_func_probe *rec;
2573
2574 rec = iter->probe;
2575 if (WARN_ON_ONCE(!rec))
2576 return -EIO;
2577
2578 if (rec->ops->print)
2579 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
2580
2581 seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
2582
2583 if (rec->data)
2584 seq_printf(m, ":%p", rec->data);
2585 seq_putc(m, '\n');
2586
2587 return 0;
2588 }
2589
2590 static void *
2591 t_next(struct seq_file *m, void *v, loff_t *pos)
2592 {
2593 struct ftrace_iterator *iter = m->private;
2594 struct ftrace_ops *ops = iter->ops;
2595 struct dyn_ftrace *rec = NULL;
2596
2597 if (unlikely(ftrace_disabled))
2598 return NULL;
2599
2600 if (iter->flags & FTRACE_ITER_HASH)
2601 return t_hash_next(m, pos);
2602
2603 (*pos)++;
2604 iter->pos = iter->func_pos = *pos;
2605
2606 if (iter->flags & FTRACE_ITER_PRINTALL)
2607 return t_hash_start(m, pos);
2608
2609 retry:
2610 if (iter->idx >= iter->pg->index) {
2611 if (iter->pg->next) {
2612 iter->pg = iter->pg->next;
2613 iter->idx = 0;
2614 goto retry;
2615 }
2616 } else {
2617 rec = &iter->pg->records[iter->idx++];
2618 if (((iter->flags & FTRACE_ITER_FILTER) &&
2619 !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
2620
2621 ((iter->flags & FTRACE_ITER_NOTRACE) &&
2622 !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
2623
2624 ((iter->flags & FTRACE_ITER_ENABLED) &&
2625 !(rec->flags & FTRACE_FL_ENABLED))) {
2626
2627 rec = NULL;
2628 goto retry;
2629 }
2630 }
2631
2632 if (!rec)
2633 return t_hash_start(m, pos);
2634
2635 iter->func = rec;
2636
2637 return iter;
2638 }
2639
2640 static void reset_iter_read(struct ftrace_iterator *iter)
2641 {
2642 iter->pos = 0;
2643 iter->func_pos = 0;
2644 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
2645 }
2646
2647 static void *t_start(struct seq_file *m, loff_t *pos)
2648 {
2649 struct ftrace_iterator *iter = m->private;
2650 struct ftrace_ops *ops = iter->ops;
2651 void *p = NULL;
2652 loff_t l;
2653
2654 mutex_lock(&ftrace_lock);
2655
2656 if (unlikely(ftrace_disabled))
2657 return NULL;
2658
2659 /*
2660 * If an lseek was done, then reset and start from beginning.
2661 */
2662 if (*pos < iter->pos)
2663 reset_iter_read(iter);
2664
2665 /*
2666 * For set_ftrace_filter reading, if we have the filter
2667 * off, we can short cut and just print out that all
2668 * functions are enabled.
2669 */
2670 if (iter->flags & FTRACE_ITER_FILTER &&
2671 ftrace_hash_empty(ops->filter_hash)) {
2672 if (*pos > 0)
2673 return t_hash_start(m, pos);
2674 iter->flags |= FTRACE_ITER_PRINTALL;
2675 /* reset in case of seek/pread */
2676 iter->flags &= ~FTRACE_ITER_HASH;
2677 return iter;
2678 }
2679
2680 if (iter->flags & FTRACE_ITER_HASH)
2681 return t_hash_start(m, pos);
2682
2683 /*
2684 * Unfortunately, we need to restart at ftrace_pages_start
2685 * every time we let go of the ftrace_mutex. This is because
2686 * those pointers can change without the lock.
2687 */
2688 iter->pg = ftrace_pages_start;
2689 iter->idx = 0;
2690 for (l = 0; l <= *pos; ) {
2691 p = t_next(m, p, &l);
2692 if (!p)
2693 break;
2694 }
2695
2696 if (!p)
2697 return t_hash_start(m, pos);
2698
2699 return iter;
2700 }
2701
2702 static void t_stop(struct seq_file *m, void *p)
2703 {
2704 mutex_unlock(&ftrace_lock);
2705 }
2706
2707 static int t_show(struct seq_file *m, void *v)
2708 {
2709 struct ftrace_iterator *iter = m->private;
2710 struct dyn_ftrace *rec;
2711
2712 if (iter->flags & FTRACE_ITER_HASH)
2713 return t_hash_show(m, iter);
2714
2715 if (iter->flags & FTRACE_ITER_PRINTALL) {
2716 seq_printf(m, "#### all functions enabled ####\n");
2717 return 0;
2718 }
2719
2720 rec = iter->func;
2721
2722 if (!rec)
2723 return 0;
2724
2725 seq_printf(m, "%ps", (void *)rec->ip);
2726 if (iter->flags & FTRACE_ITER_ENABLED)
2727 seq_printf(m, " (%ld)%s",
2728 rec->flags & ~FTRACE_FL_MASK,
2729 rec->flags & FTRACE_FL_REGS ? " R" : "");
2730 seq_printf(m, "\n");
2731
2732 return 0;
2733 }
2734
2735 static const struct seq_operations show_ftrace_seq_ops = {
2736 .start = t_start,
2737 .next = t_next,
2738 .stop = t_stop,
2739 .show = t_show,
2740 };
2741
2742 static int
2743 ftrace_avail_open(struct inode *inode, struct file *file)
2744 {
2745 struct ftrace_iterator *iter;
2746
2747 if (unlikely(ftrace_disabled))
2748 return -ENODEV;
2749
2750 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2751 if (iter) {
2752 iter->pg = ftrace_pages_start;
2753 iter->ops = &global_ops;
2754 }
2755
2756 return iter ? 0 : -ENOMEM;
2757 }
2758
2759 static int
2760 ftrace_enabled_open(struct inode *inode, struct file *file)
2761 {
2762 struct ftrace_iterator *iter;
2763
2764 if (unlikely(ftrace_disabled))
2765 return -ENODEV;
2766
2767 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2768 if (iter) {
2769 iter->pg = ftrace_pages_start;
2770 iter->flags = FTRACE_ITER_ENABLED;
2771 iter->ops = &global_ops;
2772 }
2773
2774 return iter ? 0 : -ENOMEM;
2775 }
2776
2777 static void ftrace_filter_reset(struct ftrace_hash *hash)
2778 {
2779 mutex_lock(&ftrace_lock);
2780 ftrace_hash_clear(hash);
2781 mutex_unlock(&ftrace_lock);
2782 }
2783
2784 /**
2785 * ftrace_regex_open - initialize function tracer filter files
2786 * @ops: The ftrace_ops that hold the hash filters
2787 * @flag: The type of filter to process
2788 * @inode: The inode, usually passed in to your open routine
2789 * @file: The file, usually passed in to your open routine
2790 *
2791 * ftrace_regex_open() initializes the filter files for the
2792 * @ops. Depending on @flag it may process the filter hash or
2793 * the notrace hash of @ops. With this called from the open
2794 * routine, you can use ftrace_filter_write() for the write
2795 * routine if @flag has FTRACE_ITER_FILTER set, or
2796 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
2797 * tracing_lseek() should be used as the lseek routine, and
2798 * release must call ftrace_regex_release().
2799 */
2800 int
2801 ftrace_regex_open(struct ftrace_ops *ops, int flag,
2802 struct inode *inode, struct file *file)
2803 {
2804 struct ftrace_iterator *iter;
2805 struct ftrace_hash *hash;
2806 int ret = 0;
2807
2808 ftrace_ops_init(ops);
2809
2810 if (unlikely(ftrace_disabled))
2811 return -ENODEV;
2812
2813 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2814 if (!iter)
2815 return -ENOMEM;
2816
2817 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2818 kfree(iter);
2819 return -ENOMEM;
2820 }
2821
2822 iter->ops = ops;
2823 iter->flags = flag;
2824
2825 mutex_lock(&ops->regex_lock);
2826
2827 if (flag & FTRACE_ITER_NOTRACE)
2828 hash = ops->notrace_hash;
2829 else
2830 hash = ops->filter_hash;
2831
2832 if (file->f_mode & FMODE_WRITE) {
2833 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2834 if (!iter->hash) {
2835 trace_parser_put(&iter->parser);
2836 kfree(iter);
2837 ret = -ENOMEM;
2838 goto out_unlock;
2839 }
2840 }
2841
2842 if ((file->f_mode & FMODE_WRITE) &&
2843 (file->f_flags & O_TRUNC))
2844 ftrace_filter_reset(iter->hash);
2845
2846 if (file->f_mode & FMODE_READ) {
2847 iter->pg = ftrace_pages_start;
2848
2849 ret = seq_open(file, &show_ftrace_seq_ops);
2850 if (!ret) {
2851 struct seq_file *m = file->private_data;
2852 m->private = iter;
2853 } else {
2854 /* Failed */
2855 free_ftrace_hash(iter->hash);
2856 trace_parser_put(&iter->parser);
2857 kfree(iter);
2858 }
2859 } else
2860 file->private_data = iter;
2861
2862 out_unlock:
2863 mutex_unlock(&ops->regex_lock);
2864
2865 return ret;
2866 }
2867
2868 static int
2869 ftrace_filter_open(struct inode *inode, struct file *file)
2870 {
2871 struct ftrace_ops *ops = inode->i_private;
2872
2873 return ftrace_regex_open(ops,
2874 FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
2875 inode, file);
2876 }
2877
2878 static int
2879 ftrace_notrace_open(struct inode *inode, struct file *file)
2880 {
2881 struct ftrace_ops *ops = inode->i_private;
2882
2883 return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
2884 inode, file);
2885 }
2886
2887 static int ftrace_match(char *str, char *regex, int len, int type)
2888 {
2889 int matched = 0;
2890 int slen;
2891
2892 switch (type) {
2893 case MATCH_FULL:
2894 if (strcmp(str, regex) == 0)
2895 matched = 1;
2896 break;
2897 case MATCH_FRONT_ONLY:
2898 if (strncmp(str, regex, len) == 0)
2899 matched = 1;
2900 break;
2901 case MATCH_MIDDLE_ONLY:
2902 if (strstr(str, regex))
2903 matched = 1;
2904 break;
2905 case MATCH_END_ONLY:
2906 slen = strlen(str);
2907 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
2908 matched = 1;
2909 break;
2910 }
2911
2912 return matched;
2913 }
2914
2915 static int
2916 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
2917 {
2918 struct ftrace_func_entry *entry;
2919 int ret = 0;
2920
2921 entry = ftrace_lookup_ip(hash, rec->ip);
2922 if (not) {
2923 /* Do nothing if it doesn't exist */
2924 if (!entry)
2925 return 0;
2926
2927 free_hash_entry(hash, entry);
2928 } else {
2929 /* Do nothing if it exists */
2930 if (entry)
2931 return 0;
2932
2933 ret = add_hash_entry(hash, rec->ip);
2934 }
2935 return ret;
2936 }
2937
2938 static int
2939 ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2940 char *regex, int len, int type)
2941 {
2942 char str[KSYM_SYMBOL_LEN];
2943 char *modname;
2944
2945 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2946
2947 if (mod) {
2948 /* module lookup requires matching the module */
2949 if (!modname || strcmp(modname, mod))
2950 return 0;
2951
2952 /* blank search means to match all funcs in the mod */
2953 if (!len)
2954 return 1;
2955 }
2956
2957 return ftrace_match(str, regex, len, type);
2958 }
2959
2960 static int
2961 match_records(struct ftrace_hash *hash, char *buff,
2962 int len, char *mod, int not)
2963 {
2964 unsigned search_len = 0;
2965 struct ftrace_page *pg;
2966 struct dyn_ftrace *rec;
2967 int type = MATCH_FULL;
2968 char *search = buff;
2969 int found = 0;
2970 int ret;
2971
2972 if (len) {
2973 type = filter_parse_regex(buff, len, &search, &not);
2974 search_len = strlen(search);
2975 }
2976
2977 mutex_lock(&ftrace_lock);
2978
2979 if (unlikely(ftrace_disabled))
2980 goto out_unlock;
2981
2982 do_for_each_ftrace_rec(pg, rec) {
2983 if (ftrace_match_record(rec, mod, search, search_len, type)) {
2984 ret = enter_record(hash, rec, not);
2985 if (ret < 0) {
2986 found = ret;
2987 goto out_unlock;
2988 }
2989 found = 1;
2990 }
2991 } while_for_each_ftrace_rec();
2992 out_unlock:
2993 mutex_unlock(&ftrace_lock);
2994
2995 return found;
2996 }
2997
2998 static int
2999 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
3000 {
3001 return match_records(hash, buff, len, NULL, 0);
3002 }
3003
3004 static int
3005 ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
3006 {
3007 int not = 0;
3008
3009 /* blank or '*' mean the same */
3010 if (strcmp(buff, "*") == 0)
3011 buff[0] = 0;
3012
3013 /* handle the case of 'dont filter this module' */
3014 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
3015 buff[0] = 0;
3016 not = 1;
3017 }
3018
3019 return match_records(hash, buff, strlen(buff), mod, not);
3020 }
3021
3022 /*
3023 * We register the module command as a template to show others how
3024 * to register the a command as well.
3025 */
3026
3027 static int
3028 ftrace_mod_callback(struct ftrace_hash *hash,
3029 char *func, char *cmd, char *param, int enable)
3030 {
3031 char *mod;
3032 int ret = -EINVAL;
3033
3034 /*
3035 * cmd == 'mod' because we only registered this func
3036 * for the 'mod' ftrace_func_command.
3037 * But if you register one func with multiple commands,
3038 * you can tell which command was used by the cmd
3039 * parameter.
3040 */
3041
3042 /* we must have a module name */
3043 if (!param)
3044 return ret;
3045
3046 mod = strsep(&param, ":");
3047 if (!strlen(mod))
3048 return ret;
3049
3050 ret = ftrace_match_module_records(hash, func, mod);
3051 if (!ret)
3052 ret = -EINVAL;
3053 if (ret < 0)
3054 return ret;
3055
3056 return 0;
3057 }
3058
3059 static struct ftrace_func_command ftrace_mod_cmd = {
3060 .name = "mod",
3061 .func = ftrace_mod_callback,
3062 };
3063
3064 static int __init ftrace_mod_cmd_init(void)
3065 {
3066 return register_ftrace_command(&ftrace_mod_cmd);
3067 }
3068 core_initcall(ftrace_mod_cmd_init);
3069
3070 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
3071 struct ftrace_ops *op, struct pt_regs *pt_regs)
3072 {
3073 struct ftrace_func_probe *entry;
3074 struct hlist_head *hhd;
3075 unsigned long key;
3076
3077 key = hash_long(ip, FTRACE_HASH_BITS);
3078
3079 hhd = &ftrace_func_hash[key];
3080
3081 if (hlist_empty(hhd))
3082 return;
3083
3084 /*
3085 * Disable preemption for these calls to prevent a RCU grace
3086 * period. This syncs the hash iteration and freeing of items
3087 * on the hash. rcu_read_lock is too dangerous here.
3088 */
3089 preempt_disable_notrace();
3090 hlist_for_each_entry_rcu_notrace(entry, hhd, node) {
3091 if (entry->ip == ip)
3092 entry->ops->func(ip, parent_ip, &entry->data);
3093 }
3094 preempt_enable_notrace();
3095 }
3096
3097 static struct ftrace_ops trace_probe_ops __read_mostly =
3098 {
3099 .func = function_trace_probe_call,
3100 .flags = FTRACE_OPS_FL_INITIALIZED,
3101 INIT_REGEX_LOCK(trace_probe_ops)
3102 };
3103
3104 static int ftrace_probe_registered;
3105
3106 static void __enable_ftrace_function_probe(void)
3107 {
3108 int ret;
3109 int i;
3110
3111 if (ftrace_probe_registered) {
3112 /* still need to update the function call sites */
3113 if (ftrace_enabled)
3114 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3115 return;
3116 }
3117
3118 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3119 struct hlist_head *hhd = &ftrace_func_hash[i];
3120 if (hhd->first)
3121 break;
3122 }
3123 /* Nothing registered? */
3124 if (i == FTRACE_FUNC_HASHSIZE)
3125 return;
3126
3127 ret = ftrace_startup(&trace_probe_ops, 0);
3128
3129 ftrace_probe_registered = 1;
3130 }
3131
3132 static void __disable_ftrace_function_probe(void)
3133 {
3134 int i;
3135
3136 if (!ftrace_probe_registered)
3137 return;
3138
3139 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3140 struct hlist_head *hhd = &ftrace_func_hash[i];
3141 if (hhd->first)
3142 return;
3143 }
3144
3145 /* no more funcs left */
3146 ftrace_shutdown(&trace_probe_ops, 0);
3147
3148 ftrace_probe_registered = 0;
3149 }
3150
3151
3152 static void ftrace_free_entry(struct ftrace_func_probe *entry)
3153 {
3154 if (entry->ops->free)
3155 entry->ops->free(entry->ops, entry->ip, &entry->data);
3156 kfree(entry);
3157 }
3158
3159 int
3160 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3161 void *data)
3162 {
3163 struct ftrace_func_probe *entry;
3164 struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
3165 struct ftrace_hash *hash;
3166 struct ftrace_page *pg;
3167 struct dyn_ftrace *rec;
3168 int type, len, not;
3169 unsigned long key;
3170 int count = 0;
3171 char *search;
3172 int ret;
3173
3174 type = filter_parse_regex(glob, strlen(glob), &search, &not);
3175 len = strlen(search);
3176
3177 /* we do not support '!' for function probes */
3178 if (WARN_ON(not))
3179 return -EINVAL;
3180
3181 mutex_lock(&trace_probe_ops.regex_lock);
3182
3183 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3184 if (!hash) {
3185 count = -ENOMEM;
3186 goto out;
3187 }
3188
3189 if (unlikely(ftrace_disabled)) {
3190 count = -ENODEV;
3191 goto out;
3192 }
3193
3194 mutex_lock(&ftrace_lock);
3195
3196 do_for_each_ftrace_rec(pg, rec) {
3197
3198 if (!ftrace_match_record(rec, NULL, search, len, type))
3199 continue;
3200
3201 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
3202 if (!entry) {
3203 /* If we did not process any, then return error */
3204 if (!count)
3205 count = -ENOMEM;
3206 goto out_unlock;
3207 }
3208
3209 count++;
3210
3211 entry->data = data;
3212
3213 /*
3214 * The caller might want to do something special
3215 * for each function we find. We call the callback
3216 * to give the caller an opportunity to do so.
3217 */
3218 if (ops->init) {
3219 if (ops->init(ops, rec->ip, &entry->data) < 0) {
3220 /* caller does not like this func */
3221 kfree(entry);
3222 continue;
3223 }
3224 }
3225
3226 ret = enter_record(hash, rec, 0);
3227 if (ret < 0) {
3228 kfree(entry);
3229 count = ret;
3230 goto out_unlock;
3231 }
3232
3233 entry->ops = ops;
3234 entry->ip = rec->ip;
3235
3236 key = hash_long(entry->ip, FTRACE_HASH_BITS);
3237 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
3238
3239 } while_for_each_ftrace_rec();
3240
3241 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3242 if (ret < 0)
3243 count = ret;
3244
3245 __enable_ftrace_function_probe();
3246
3247 out_unlock:
3248 mutex_unlock(&ftrace_lock);
3249 out:
3250 mutex_unlock(&trace_probe_ops.regex_lock);
3251 free_ftrace_hash(hash);
3252
3253 return count;
3254 }
3255
3256 enum {
3257 PROBE_TEST_FUNC = 1,
3258 PROBE_TEST_DATA = 2
3259 };
3260
3261 static void
3262 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3263 void *data, int flags)
3264 {
3265 struct ftrace_func_entry *rec_entry;
3266 struct ftrace_func_probe *entry;
3267 struct ftrace_func_probe *p;
3268 struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
3269 struct list_head free_list;
3270 struct ftrace_hash *hash;
3271 struct hlist_node *tmp;
3272 char str[KSYM_SYMBOL_LEN];
3273 int type = MATCH_FULL;
3274 int i, len = 0;
3275 char *search;
3276
3277 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
3278 glob = NULL;
3279 else if (glob) {
3280 int not;
3281
3282 type = filter_parse_regex(glob, strlen(glob), &search, &not);
3283 len = strlen(search);
3284
3285 /* we do not support '!' for function probes */
3286 if (WARN_ON(not))
3287 return;
3288 }
3289
3290 mutex_lock(&trace_probe_ops.regex_lock);
3291
3292 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3293 if (!hash)
3294 /* Hmm, should report this somehow */
3295 goto out_unlock;
3296
3297 INIT_LIST_HEAD(&free_list);
3298
3299 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3300 struct hlist_head *hhd = &ftrace_func_hash[i];
3301
3302 hlist_for_each_entry_safe(entry, tmp, hhd, node) {
3303
3304 /* break up if statements for readability */
3305 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
3306 continue;
3307
3308 if ((flags & PROBE_TEST_DATA) && entry->data != data)
3309 continue;
3310
3311 /* do this last, since it is the most expensive */
3312 if (glob) {
3313 kallsyms_lookup(entry->ip, NULL, NULL,
3314 NULL, str);
3315 if (!ftrace_match(str, glob, len, type))
3316 continue;
3317 }
3318
3319 rec_entry = ftrace_lookup_ip(hash, entry->ip);
3320 /* It is possible more than one entry had this ip */
3321 if (rec_entry)
3322 free_hash_entry(hash, rec_entry);
3323
3324 hlist_del_rcu(&entry->node);
3325 list_add(&entry->free_list, &free_list);
3326 }
3327 }
3328 mutex_lock(&ftrace_lock);
3329 __disable_ftrace_function_probe();
3330 /*
3331 * Remove after the disable is called. Otherwise, if the last
3332 * probe is removed, a null hash means *all enabled*.
3333 */
3334 ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3335 synchronize_sched();
3336 list_for_each_entry_safe(entry, p, &free_list, free_list) {
3337 list_del(&entry->free_list);
3338 ftrace_free_entry(entry);
3339 }
3340 mutex_unlock(&ftrace_lock);
3341
3342 out_unlock:
3343 mutex_unlock(&trace_probe_ops.regex_lock);
3344 free_ftrace_hash(hash);
3345 }
3346
3347 void
3348 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3349 void *data)
3350 {
3351 __unregister_ftrace_function_probe(glob, ops, data,
3352 PROBE_TEST_FUNC | PROBE_TEST_DATA);
3353 }
3354
3355 void
3356 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
3357 {
3358 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
3359 }
3360
3361 void unregister_ftrace_function_probe_all(char *glob)
3362 {
3363 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
3364 }
3365
3366 static LIST_HEAD(ftrace_commands);
3367 static DEFINE_MUTEX(ftrace_cmd_mutex);
3368
3369 /*
3370 * Currently we only register ftrace commands from __init, so mark this
3371 * __init too.
3372 */
3373 __init int register_ftrace_command(struct ftrace_func_command *cmd)
3374 {
3375 struct ftrace_func_command *p;
3376 int ret = 0;
3377
3378 mutex_lock(&ftrace_cmd_mutex);
3379 list_for_each_entry(p, &ftrace_commands, list) {
3380 if (strcmp(cmd->name, p->name) == 0) {
3381 ret = -EBUSY;
3382 goto out_unlock;
3383 }
3384 }
3385 list_add(&cmd->list, &ftrace_commands);
3386 out_unlock:
3387 mutex_unlock(&ftrace_cmd_mutex);
3388
3389 return ret;
3390 }
3391
3392 /*
3393 * Currently we only unregister ftrace commands from __init, so mark
3394 * this __init too.
3395 */
3396 __init int unregister_ftrace_command(struct ftrace_func_command *cmd)
3397 {
3398 struct ftrace_func_command *p, *n;
3399 int ret = -ENODEV;
3400
3401 mutex_lock(&ftrace_cmd_mutex);
3402 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
3403 if (strcmp(cmd->name, p->name) == 0) {
3404 ret = 0;
3405 list_del_init(&p->list);
3406 goto out_unlock;
3407 }
3408 }
3409 out_unlock:
3410 mutex_unlock(&ftrace_cmd_mutex);
3411
3412 return ret;
3413 }
3414
3415 static int ftrace_process_regex(struct ftrace_hash *hash,
3416 char *buff, int len, int enable)
3417 {
3418 char *func, *command, *next = buff;
3419 struct ftrace_func_command *p;
3420 int ret = -EINVAL;
3421
3422 func = strsep(&next, ":");
3423
3424 if (!next) {
3425 ret = ftrace_match_records(hash, func, len);
3426 if (!ret)
3427 ret = -EINVAL;
3428 if (ret < 0)
3429 return ret;
3430 return 0;
3431 }
3432
3433 /* command found */
3434
3435 command = strsep(&next, ":");
3436
3437 mutex_lock(&ftrace_cmd_mutex);
3438 list_for_each_entry(p, &ftrace_commands, list) {
3439 if (strcmp(p->name, command) == 0) {
3440 ret = p->func(hash, func, command, next, enable);
3441 goto out_unlock;
3442 }
3443 }
3444 out_unlock:
3445 mutex_unlock(&ftrace_cmd_mutex);
3446
3447 return ret;
3448 }
3449
3450 static ssize_t
3451 ftrace_regex_write(struct file *file, const char __user *ubuf,
3452 size_t cnt, loff_t *ppos, int enable)
3453 {
3454 struct ftrace_iterator *iter;
3455 struct trace_parser *parser;
3456 ssize_t ret, read;
3457
3458 if (!cnt)
3459 return 0;
3460
3461 if (file->f_mode & FMODE_READ) {
3462 struct seq_file *m = file->private_data;
3463 iter = m->private;
3464 } else
3465 iter = file->private_data;
3466
3467 if (unlikely(ftrace_disabled))
3468 return -ENODEV;
3469
3470 /* iter->hash is a local copy, so we don't need regex_lock */
3471
3472 parser = &iter->parser;
3473 read = trace_get_user(parser, ubuf, cnt, ppos);
3474
3475 if (read >= 0 && trace_parser_loaded(parser) &&
3476 !trace_parser_cont(parser)) {
3477 ret = ftrace_process_regex(iter->hash, parser->buffer,
3478 parser->idx, enable);
3479 trace_parser_clear(parser);
3480 if (ret < 0)
3481 goto out;
3482 }
3483
3484 ret = read;
3485 out:
3486 return ret;
3487 }
3488
3489 ssize_t
3490 ftrace_filter_write(struct file *file, const char __user *ubuf,
3491 size_t cnt, loff_t *ppos)
3492 {
3493 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
3494 }
3495
3496 ssize_t
3497 ftrace_notrace_write(struct file *file, const char __user *ubuf,
3498 size_t cnt, loff_t *ppos)
3499 {
3500 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
3501 }
3502
3503 static int
3504 ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
3505 {
3506 struct ftrace_func_entry *entry;
3507
3508 if (!ftrace_location(ip))
3509 return -EINVAL;
3510
3511 if (remove) {
3512 entry = ftrace_lookup_ip(hash, ip);
3513 if (!entry)
3514 return -ENOENT;
3515 free_hash_entry(hash, entry);
3516 return 0;
3517 }
3518
3519 return add_hash_entry(hash, ip);
3520 }
3521
3522 static void ftrace_ops_update_code(struct ftrace_ops *ops)
3523 {
3524 if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled)
3525 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3526 }
3527
3528 static int
3529 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3530 unsigned long ip, int remove, int reset, int enable)
3531 {
3532 struct ftrace_hash **orig_hash;
3533 struct ftrace_hash *hash;
3534 int ret;
3535
3536 /* All global ops uses the global ops filters */
3537 if (ops->flags & FTRACE_OPS_FL_GLOBAL)
3538 ops = &global_ops;
3539
3540 if (unlikely(ftrace_disabled))
3541 return -ENODEV;
3542
3543 mutex_lock(&ops->regex_lock);
3544
3545 if (enable)
3546 orig_hash = &ops->filter_hash;
3547 else
3548 orig_hash = &ops->notrace_hash;
3549
3550 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3551 if (!hash) {
3552 ret = -ENOMEM;
3553 goto out_regex_unlock;
3554 }
3555
3556 if (reset)
3557 ftrace_filter_reset(hash);
3558 if (buf && !ftrace_match_records(hash, buf, len)) {
3559 ret = -EINVAL;
3560 goto out_regex_unlock;
3561 }
3562 if (ip) {
3563 ret = ftrace_match_addr(hash, ip, remove);
3564 if (ret < 0)
3565 goto out_regex_unlock;
3566 }
3567
3568 mutex_lock(&ftrace_lock);
3569 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3570 if (!ret)
3571 ftrace_ops_update_code(ops);
3572
3573 mutex_unlock(&ftrace_lock);
3574
3575 out_regex_unlock:
3576 mutex_unlock(&ops->regex_lock);
3577
3578 free_ftrace_hash(hash);
3579 return ret;
3580 }
3581
3582 static int
3583 ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
3584 int reset, int enable)
3585 {
3586 return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
3587 }
3588
3589 /**
3590 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
3591 * @ops - the ops to set the filter with
3592 * @ip - the address to add to or remove from the filter.
3593 * @remove - non zero to remove the ip from the filter
3594 * @reset - non zero to reset all filters before applying this filter.
3595 *
3596 * Filters denote which functions should be enabled when tracing is enabled
3597 * If @ip is NULL, it failes to update filter.
3598 */
3599 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
3600 int remove, int reset)
3601 {
3602 ftrace_ops_init(ops);
3603 return ftrace_set_addr(ops, ip, remove, reset, 1);
3604 }
3605 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
3606
3607 static int
3608 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3609 int reset, int enable)
3610 {
3611 return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
3612 }
3613
3614 /**
3615 * ftrace_set_filter - set a function to filter on in ftrace
3616 * @ops - the ops to set the filter with
3617 * @buf - the string that holds the function filter text.
3618 * @len - the length of the string.
3619 * @reset - non zero to reset all filters before applying this filter.
3620 *
3621 * Filters denote which functions should be enabled when tracing is enabled.
3622 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3623 */
3624 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
3625 int len, int reset)
3626 {
3627 ftrace_ops_init(ops);
3628 return ftrace_set_regex(ops, buf, len, reset, 1);
3629 }
3630 EXPORT_SYMBOL_GPL(ftrace_set_filter);
3631
3632 /**
3633 * ftrace_set_notrace - set a function to not trace in ftrace
3634 * @ops - the ops to set the notrace filter with
3635 * @buf - the string that holds the function notrace text.
3636 * @len - the length of the string.
3637 * @reset - non zero to reset all filters before applying this filter.
3638 *
3639 * Notrace Filters denote which functions should not be enabled when tracing
3640 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3641 * for tracing.
3642 */
3643 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
3644 int len, int reset)
3645 {
3646 ftrace_ops_init(ops);
3647 return ftrace_set_regex(ops, buf, len, reset, 0);
3648 }
3649 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
3650 /**
3651 * ftrace_set_filter - set a function to filter on in ftrace
3652 * @ops - the ops to set the filter with
3653 * @buf - the string that holds the function filter text.
3654 * @len - the length of the string.
3655 * @reset - non zero to reset all filters before applying this filter.
3656 *
3657 * Filters denote which functions should be enabled when tracing is enabled.
3658 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3659 */
3660 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
3661 {
3662 ftrace_set_regex(&global_ops, buf, len, reset, 1);
3663 }
3664 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
3665
3666 /**
3667 * ftrace_set_notrace - set a function to not trace in ftrace
3668 * @ops - the ops to set the notrace filter with
3669 * @buf - the string that holds the function notrace text.
3670 * @len - the length of the string.
3671 * @reset - non zero to reset all filters before applying this filter.
3672 *
3673 * Notrace Filters denote which functions should not be enabled when tracing
3674 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3675 * for tracing.
3676 */
3677 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
3678 {
3679 ftrace_set_regex(&global_ops, buf, len, reset, 0);
3680 }
3681 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
3682
3683 /*
3684 * command line interface to allow users to set filters on boot up.
3685 */
3686 #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
3687 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3688 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3689
3690 /* Used by function selftest to not test if filter is set */
3691 bool ftrace_filter_param __initdata;
3692
3693 static int __init set_ftrace_notrace(char *str)
3694 {
3695 ftrace_filter_param = true;
3696 strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
3697 return 1;
3698 }
3699 __setup("ftrace_notrace=", set_ftrace_notrace);
3700
3701 static int __init set_ftrace_filter(char *str)
3702 {
3703 ftrace_filter_param = true;
3704 strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
3705 return 1;
3706 }
3707 __setup("ftrace_filter=", set_ftrace_filter);
3708
3709 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3710 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
3711 static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer);
3712
3713 static int __init set_graph_function(char *str)
3714 {
3715 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
3716 return 1;
3717 }
3718 __setup("ftrace_graph_filter=", set_graph_function);
3719
3720 static void __init set_ftrace_early_graph(char *buf)
3721 {
3722 int ret;
3723 char *func;
3724
3725 while (buf) {
3726 func = strsep(&buf, ",");
3727 /* we allow only one expression at a time */
3728 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3729 FTRACE_GRAPH_MAX_FUNCS, func);
3730 if (ret)
3731 printk(KERN_DEBUG "ftrace: function %s not "
3732 "traceable\n", func);
3733 }
3734 }
3735 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3736
3737 void __init
3738 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
3739 {
3740 char *func;
3741
3742 ftrace_ops_init(ops);
3743
3744 while (buf) {
3745 func = strsep(&buf, ",");
3746 ftrace_set_regex(ops, func, strlen(func), 0, enable);
3747 }
3748 }
3749
3750 static void __init set_ftrace_early_filters(void)
3751 {
3752 if (ftrace_filter_buf[0])
3753 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
3754 if (ftrace_notrace_buf[0])
3755 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
3756 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3757 if (ftrace_graph_buf[0])
3758 set_ftrace_early_graph(ftrace_graph_buf);
3759 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3760 }
3761
3762 int ftrace_regex_release(struct inode *inode, struct file *file)
3763 {
3764 struct seq_file *m = (struct seq_file *)file->private_data;
3765 struct ftrace_iterator *iter;
3766 struct ftrace_hash **orig_hash;
3767 struct trace_parser *parser;
3768 int filter_hash;
3769 int ret;
3770
3771 if (file->f_mode & FMODE_READ) {
3772 iter = m->private;
3773 seq_release(inode, file);
3774 } else
3775 iter = file->private_data;
3776
3777 parser = &iter->parser;
3778 if (trace_parser_loaded(parser)) {
3779 parser->buffer[parser->idx] = 0;
3780 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
3781 }
3782
3783 trace_parser_put(parser);
3784
3785 mutex_lock(&iter->ops->regex_lock);
3786
3787 if (file->f_mode & FMODE_WRITE) {
3788 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3789
3790 if (filter_hash)
3791 orig_hash = &iter->ops->filter_hash;
3792 else
3793 orig_hash = &iter->ops->notrace_hash;
3794
3795 mutex_lock(&ftrace_lock);
3796 ret = ftrace_hash_move(iter->ops, filter_hash,
3797 orig_hash, iter->hash);
3798 if (!ret)
3799 ftrace_ops_update_code(iter->ops);
3800
3801 mutex_unlock(&ftrace_lock);
3802 }
3803
3804 mutex_unlock(&iter->ops->regex_lock);
3805 free_ftrace_hash(iter->hash);
3806 kfree(iter);
3807
3808 return 0;
3809 }
3810
3811 static const struct file_operations ftrace_avail_fops = {
3812 .open = ftrace_avail_open,
3813 .read = seq_read,
3814 .llseek = seq_lseek,
3815 .release = seq_release_private,
3816 };
3817
3818 static const struct file_operations ftrace_enabled_fops = {
3819 .open = ftrace_enabled_open,
3820 .read = seq_read,
3821 .llseek = seq_lseek,
3822 .release = seq_release_private,
3823 };
3824
3825 static const struct file_operations ftrace_filter_fops = {
3826 .open = ftrace_filter_open,
3827 .read = seq_read,
3828 .write = ftrace_filter_write,
3829 .llseek = tracing_lseek,
3830 .release = ftrace_regex_release,
3831 };
3832
3833 static const struct file_operations ftrace_notrace_fops = {
3834 .open = ftrace_notrace_open,
3835 .read = seq_read,
3836 .write = ftrace_notrace_write,
3837 .llseek = tracing_lseek,
3838 .release = ftrace_regex_release,
3839 };
3840
3841 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3842
3843 static DEFINE_MUTEX(graph_lock);
3844
3845 int ftrace_graph_count;
3846 int ftrace_graph_notrace_count;
3847 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3848 unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3849
3850 struct ftrace_graph_data {
3851 unsigned long *table;
3852 size_t size;
3853 int *count;
3854 const struct seq_operations *seq_ops;
3855 };
3856
3857 static void *
3858 __g_next(struct seq_file *m, loff_t *pos)
3859 {
3860 struct ftrace_graph_data *fgd = m->private;
3861
3862 if (*pos >= *fgd->count)
3863 return NULL;
3864 return &fgd->table[*pos];
3865 }
3866
3867 static void *
3868 g_next(struct seq_file *m, void *v, loff_t *pos)
3869 {
3870 (*pos)++;
3871 return __g_next(m, pos);
3872 }
3873
3874 static void *g_start(struct seq_file *m, loff_t *pos)
3875 {
3876 struct ftrace_graph_data *fgd = m->private;
3877
3878 mutex_lock(&graph_lock);
3879
3880 /* Nothing, tell g_show to print all functions are enabled */
3881 if (!*fgd->count && !*pos)
3882 return (void *)1;
3883
3884 return __g_next(m, pos);
3885 }
3886
3887 static void g_stop(struct seq_file *m, void *p)
3888 {
3889 mutex_unlock(&graph_lock);
3890 }
3891
3892 static int g_show(struct seq_file *m, void *v)
3893 {
3894 unsigned long *ptr = v;
3895
3896 if (!ptr)
3897 return 0;
3898
3899 if (ptr == (unsigned long *)1) {
3900 seq_printf(m, "#### all functions enabled ####\n");
3901 return 0;
3902 }
3903
3904 seq_printf(m, "%ps\n", (void *)*ptr);
3905
3906 return 0;
3907 }
3908
3909 static const struct seq_operations ftrace_graph_seq_ops = {
3910 .start = g_start,
3911 .next = g_next,
3912 .stop = g_stop,
3913 .show = g_show,
3914 };
3915
3916 static int
3917 __ftrace_graph_open(struct inode *inode, struct file *file,
3918 struct ftrace_graph_data *fgd)
3919 {
3920 int ret = 0;
3921
3922 mutex_lock(&graph_lock);
3923 if ((file->f_mode & FMODE_WRITE) &&
3924 (file->f_flags & O_TRUNC)) {
3925 *fgd->count = 0;
3926 memset(fgd->table, 0, fgd->size * sizeof(*fgd->table));
3927 }
3928 mutex_unlock(&graph_lock);
3929
3930 if (file->f_mode & FMODE_READ) {
3931 ret = seq_open(file, fgd->seq_ops);
3932 if (!ret) {
3933 struct seq_file *m = file->private_data;
3934 m->private = fgd;
3935 }
3936 } else
3937 file->private_data = fgd;
3938
3939 return ret;
3940 }
3941
3942 static int
3943 ftrace_graph_open(struct inode *inode, struct file *file)
3944 {
3945 struct ftrace_graph_data *fgd;
3946
3947 if (unlikely(ftrace_disabled))
3948 return -ENODEV;
3949
3950 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
3951 if (fgd == NULL)
3952 return -ENOMEM;
3953
3954 fgd->table = ftrace_graph_funcs;
3955 fgd->size = FTRACE_GRAPH_MAX_FUNCS;
3956 fgd->count = &ftrace_graph_count;
3957 fgd->seq_ops = &ftrace_graph_seq_ops;
3958
3959 return __ftrace_graph_open(inode, file, fgd);
3960 }
3961
3962 static int
3963 ftrace_graph_notrace_open(struct inode *inode, struct file *file)
3964 {
3965 struct ftrace_graph_data *fgd;
3966
3967 if (unlikely(ftrace_disabled))
3968 return -ENODEV;
3969
3970 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
3971 if (fgd == NULL)
3972 return -ENOMEM;
3973
3974 fgd->table = ftrace_graph_notrace_funcs;
3975 fgd->size = FTRACE_GRAPH_MAX_FUNCS;
3976 fgd->count = &ftrace_graph_notrace_count;
3977 fgd->seq_ops = &ftrace_graph_seq_ops;
3978
3979 return __ftrace_graph_open(inode, file, fgd);
3980 }
3981
3982 static int
3983 ftrace_graph_release(struct inode *inode, struct file *file)
3984 {
3985 if (file->f_mode & FMODE_READ) {
3986 struct seq_file *m = file->private_data;
3987
3988 kfree(m->private);
3989 seq_release(inode, file);
3990 } else {
3991 kfree(file->private_data);
3992 }
3993
3994 return 0;
3995 }
3996
3997 static int
3998 ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer)
3999 {
4000 struct dyn_ftrace *rec;
4001 struct ftrace_page *pg;
4002 int search_len;
4003 int fail = 1;
4004 int type, not;
4005 char *search;
4006 bool exists;
4007 int i;
4008
4009 /* decode regex */
4010 type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
4011 if (!not && *idx >= size)
4012 return -EBUSY;
4013
4014 search_len = strlen(search);
4015
4016 mutex_lock(&ftrace_lock);
4017
4018 if (unlikely(ftrace_disabled)) {
4019 mutex_unlock(&ftrace_lock);
4020 return -ENODEV;
4021 }
4022
4023 do_for_each_ftrace_rec(pg, rec) {
4024
4025 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
4026 /* if it is in the array */
4027 exists = false;
4028 for (i = 0; i < *idx; i++) {
4029 if (array[i] == rec->ip) {
4030 exists = true;
4031 break;
4032 }
4033 }
4034
4035 if (!not) {
4036 fail = 0;
4037 if (!exists) {
4038 array[(*idx)++] = rec->ip;
4039 if (*idx >= size)
4040 goto out;
4041 }
4042 } else {
4043 if (exists) {
4044 array[i] = array[--(*idx)];
4045 array[*idx] = 0;
4046 fail = 0;
4047 }
4048 }
4049 }
4050 } while_for_each_ftrace_rec();
4051 out:
4052 mutex_unlock(&ftrace_lock);
4053
4054 if (fail)
4055 return -EINVAL;
4056
4057 return 0;
4058 }
4059
4060 static ssize_t
4061 ftrace_graph_write(struct file *file, const char __user *ubuf,
4062 size_t cnt, loff_t *ppos)
4063 {
4064 struct trace_parser parser;
4065 ssize_t read, ret = 0;
4066 struct ftrace_graph_data *fgd = file->private_data;
4067
4068 if (!cnt)
4069 return 0;
4070
4071 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX))
4072 return -ENOMEM;
4073
4074 read = trace_get_user(&parser, ubuf, cnt, ppos);
4075
4076 if (read >= 0 && trace_parser_loaded((&parser))) {
4077 parser.buffer[parser.idx] = 0;
4078
4079 mutex_lock(&graph_lock);
4080
4081 /* we allow only one expression at a time */
4082 ret = ftrace_set_func(fgd->table, fgd->count, fgd->size,
4083 parser.buffer);
4084
4085 mutex_unlock(&graph_lock);
4086 }
4087
4088 if (!ret)
4089 ret = read;
4090
4091 trace_parser_put(&parser);
4092
4093 return ret;
4094 }
4095
4096 static const struct file_operations ftrace_graph_fops = {
4097 .open = ftrace_graph_open,
4098 .read = seq_read,
4099 .write = ftrace_graph_write,
4100 .llseek = tracing_lseek,
4101 .release = ftrace_graph_release,
4102 };
4103
4104 static const struct file_operations ftrace_graph_notrace_fops = {
4105 .open = ftrace_graph_notrace_open,
4106 .read = seq_read,
4107 .write = ftrace_graph_write,
4108 .llseek = tracing_lseek,
4109 .release = ftrace_graph_release,
4110 };
4111 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4112
4113 void ftrace_create_filter_files(struct ftrace_ops *ops,
4114 struct dentry *parent)
4115 {
4116
4117 trace_create_file("set_ftrace_filter", 0644, parent,
4118 ops, &ftrace_filter_fops);
4119
4120 trace_create_file("set_ftrace_notrace", 0644, parent,
4121 ops, &ftrace_notrace_fops);
4122 }
4123
4124 /*
4125 * The name "destroy_filter_files" is really a misnomer. Although
4126 * in the future, it may actualy delete the files, but this is
4127 * really intended to make sure the ops passed in are disabled
4128 * and that when this function returns, the caller is free to
4129 * free the ops.
4130 *
4131 * The "destroy" name is only to match the "create" name that this
4132 * should be paired with.
4133 */
4134 void ftrace_destroy_filter_files(struct ftrace_ops *ops)
4135 {
4136 mutex_lock(&ftrace_lock);
4137 if (ops->flags & FTRACE_OPS_FL_ENABLED)
4138 ftrace_shutdown(ops, 0);
4139 ops->flags |= FTRACE_OPS_FL_DELETED;
4140 mutex_unlock(&ftrace_lock);
4141 }
4142
4143 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
4144 {
4145
4146 trace_create_file("available_filter_functions", 0444,
4147 d_tracer, NULL, &ftrace_avail_fops);
4148
4149 trace_create_file("enabled_functions", 0444,
4150 d_tracer, NULL, &ftrace_enabled_fops);
4151
4152 ftrace_create_filter_files(&global_ops, d_tracer);
4153
4154 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4155 trace_create_file("set_graph_function", 0444, d_tracer,
4156 NULL,
4157 &ftrace_graph_fops);
4158 trace_create_file("set_graph_notrace", 0444, d_tracer,
4159 NULL,
4160 &ftrace_graph_notrace_fops);
4161 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4162
4163 return 0;
4164 }
4165
4166 static int ftrace_cmp_ips(const void *a, const void *b)
4167 {
4168 const unsigned long *ipa = a;
4169 const unsigned long *ipb = b;
4170
4171 if (*ipa > *ipb)
4172 return 1;
4173 if (*ipa < *ipb)
4174 return -1;
4175 return 0;
4176 }
4177
4178 static void ftrace_swap_ips(void *a, void *b, int size)
4179 {
4180 unsigned long *ipa = a;
4181 unsigned long *ipb = b;
4182 unsigned long t;
4183
4184 t = *ipa;
4185 *ipa = *ipb;
4186 *ipb = t;
4187 }
4188
4189 static int ftrace_process_locs(struct module *mod,
4190 unsigned long *start,
4191 unsigned long *end)
4192 {
4193 struct ftrace_page *start_pg;
4194 struct ftrace_page *pg;
4195 struct dyn_ftrace *rec;
4196 unsigned long count;
4197 unsigned long *p;
4198 unsigned long addr;
4199 unsigned long flags = 0; /* Shut up gcc */
4200 int ret = -ENOMEM;
4201
4202 count = end - start;
4203
4204 if (!count)
4205 return 0;
4206
4207 sort(start, count, sizeof(*start),
4208 ftrace_cmp_ips, ftrace_swap_ips);
4209
4210 start_pg = ftrace_allocate_pages(count);
4211 if (!start_pg)
4212 return -ENOMEM;
4213
4214 mutex_lock(&ftrace_lock);
4215
4216 /*
4217 * Core and each module needs their own pages, as
4218 * modules will free them when they are removed.
4219 * Force a new page to be allocated for modules.
4220 */
4221 if (!mod) {
4222 WARN_ON(ftrace_pages || ftrace_pages_start);
4223 /* First initialization */
4224 ftrace_pages = ftrace_pages_start = start_pg;
4225 } else {
4226 if (!ftrace_pages)
4227 goto out;
4228
4229 if (WARN_ON(ftrace_pages->next)) {
4230 /* Hmm, we have free pages? */
4231 while (ftrace_pages->next)
4232 ftrace_pages = ftrace_pages->next;
4233 }
4234
4235 ftrace_pages->next = start_pg;
4236 }
4237
4238 p = start;
4239 pg = start_pg;
4240 while (p < end) {
4241 addr = ftrace_call_adjust(*p++);
4242 /*
4243 * Some architecture linkers will pad between
4244 * the different mcount_loc sections of different
4245 * object files to satisfy alignments.
4246 * Skip any NULL pointers.
4247 */
4248 if (!addr)
4249 continue;
4250
4251 if (pg->index == pg->size) {
4252 /* We should have allocated enough */
4253 if (WARN_ON(!pg->next))
4254 break;
4255 pg = pg->next;
4256 }
4257
4258 rec = &pg->records[pg->index++];
4259 rec->ip = addr;
4260 }
4261
4262 /* We should have used all pages */
4263 WARN_ON(pg->next);
4264
4265 /* Assign the last page to ftrace_pages */
4266 ftrace_pages = pg;
4267
4268 /*
4269 * We only need to disable interrupts on start up
4270 * because we are modifying code that an interrupt
4271 * may execute, and the modification is not atomic.
4272 * But for modules, nothing runs the code we modify
4273 * until we are finished with it, and there's no
4274 * reason to cause large interrupt latencies while we do it.
4275 */
4276 if (!mod)
4277 local_irq_save(flags);
4278 ftrace_update_code(mod, start_pg);
4279 if (!mod)
4280 local_irq_restore(flags);
4281 ret = 0;
4282 out:
4283 mutex_unlock(&ftrace_lock);
4284
4285 return ret;
4286 }
4287
4288 #ifdef CONFIG_MODULES
4289
4290 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
4291
4292 void ftrace_release_mod(struct module *mod)
4293 {
4294 struct dyn_ftrace *rec;
4295 struct ftrace_page **last_pg;
4296 struct ftrace_page *pg;
4297 int order;
4298
4299 mutex_lock(&ftrace_lock);
4300
4301 if (ftrace_disabled)
4302 goto out_unlock;
4303
4304 /*
4305 * Each module has its own ftrace_pages, remove
4306 * them from the list.
4307 */
4308 last_pg = &ftrace_pages_start;
4309 for (pg = ftrace_pages_start; pg; pg = *last_pg) {
4310 rec = &pg->records[0];
4311 if (within_module_core(rec->ip, mod)) {
4312 /*
4313 * As core pages are first, the first
4314 * page should never be a module page.
4315 */
4316 if (WARN_ON(pg == ftrace_pages_start))
4317 goto out_unlock;
4318
4319 /* Check if we are deleting the last page */
4320 if (pg == ftrace_pages)
4321 ftrace_pages = next_to_ftrace_page(last_pg);
4322
4323 *last_pg = pg->next;
4324 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
4325 free_pages((unsigned long)pg->records, order);
4326 kfree(pg);
4327 } else
4328 last_pg = &pg->next;
4329 }
4330 out_unlock:
4331 mutex_unlock(&ftrace_lock);
4332 }
4333
4334 static void ftrace_init_module(struct module *mod,
4335 unsigned long *start, unsigned long *end)
4336 {
4337 if (ftrace_disabled || start == end)
4338 return;
4339 ftrace_process_locs(mod, start, end);
4340 }
4341
4342 static int ftrace_module_notify_enter(struct notifier_block *self,
4343 unsigned long val, void *data)
4344 {
4345 struct module *mod = data;
4346
4347 if (val == MODULE_STATE_COMING)
4348 ftrace_init_module(mod, mod->ftrace_callsites,
4349 mod->ftrace_callsites +
4350 mod->num_ftrace_callsites);
4351 return 0;
4352 }
4353
4354 static int ftrace_module_notify_exit(struct notifier_block *self,
4355 unsigned long val, void *data)
4356 {
4357 struct module *mod = data;
4358
4359 if (val == MODULE_STATE_GOING)
4360 ftrace_release_mod(mod);
4361
4362 return 0;
4363 }
4364 #else
4365 static int ftrace_module_notify_enter(struct notifier_block *self,
4366 unsigned long val, void *data)
4367 {
4368 return 0;
4369 }
4370 static int ftrace_module_notify_exit(struct notifier_block *self,
4371 unsigned long val, void *data)
4372 {
4373 return 0;
4374 }
4375 #endif /* CONFIG_MODULES */
4376
4377 struct notifier_block ftrace_module_enter_nb = {
4378 .notifier_call = ftrace_module_notify_enter,
4379 .priority = INT_MAX, /* Run before anything that can use kprobes */
4380 };
4381
4382 struct notifier_block ftrace_module_exit_nb = {
4383 .notifier_call = ftrace_module_notify_exit,
4384 .priority = INT_MIN, /* Run after anything that can remove kprobes */
4385 };
4386
4387 void __init ftrace_init(void)
4388 {
4389 extern unsigned long __start_mcount_loc[];
4390 extern unsigned long __stop_mcount_loc[];
4391 unsigned long count, addr, flags;
4392 int ret;
4393
4394 /* Keep the ftrace pointer to the stub */
4395 addr = (unsigned long)ftrace_stub;
4396
4397 local_irq_save(flags);
4398 ftrace_dyn_arch_init(&addr);
4399 local_irq_restore(flags);
4400
4401 /* ftrace_dyn_arch_init places the return code in addr */
4402 if (addr)
4403 goto failed;
4404
4405 count = __stop_mcount_loc - __start_mcount_loc;
4406
4407 ret = ftrace_dyn_table_alloc(count);
4408 if (ret)
4409 goto failed;
4410
4411 last_ftrace_enabled = ftrace_enabled = 1;
4412
4413 ret = ftrace_process_locs(NULL,
4414 __start_mcount_loc,
4415 __stop_mcount_loc);
4416
4417 ret = register_module_notifier(&ftrace_module_enter_nb);
4418 if (ret)
4419 pr_warning("Failed to register trace ftrace module enter notifier\n");
4420
4421 ret = register_module_notifier(&ftrace_module_exit_nb);
4422 if (ret)
4423 pr_warning("Failed to register trace ftrace module exit notifier\n");
4424
4425 set_ftrace_early_filters();
4426
4427 return;
4428 failed:
4429 ftrace_disabled = 1;
4430 }
4431
4432 #else
4433
4434 static struct ftrace_ops global_ops = {
4435 .func = ftrace_stub,
4436 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4437 INIT_REGEX_LOCK(global_ops)
4438 };
4439
4440 static int __init ftrace_nodyn_init(void)
4441 {
4442 ftrace_enabled = 1;
4443 return 0;
4444 }
4445 core_initcall(ftrace_nodyn_init);
4446
4447 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
4448 static inline void ftrace_startup_enable(int command) { }
4449 /* Keep as macros so we do not need to define the commands */
4450 # define ftrace_startup(ops, command) \
4451 ({ \
4452 int ___ret = __register_ftrace_function(ops); \
4453 if (!___ret) \
4454 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
4455 ___ret; \
4456 })
4457 # define ftrace_shutdown(ops, command) \
4458 ({ \
4459 int ___ret = __unregister_ftrace_function(ops); \
4460 if (!___ret) \
4461 (ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \
4462 ___ret; \
4463 })
4464
4465 # define ftrace_startup_sysctl() do { } while (0)
4466 # define ftrace_shutdown_sysctl() do { } while (0)
4467
4468 static inline int
4469 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
4470 {
4471 return 1;
4472 }
4473
4474 #endif /* CONFIG_DYNAMIC_FTRACE */
4475
4476 static void
4477 ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
4478 struct ftrace_ops *op, struct pt_regs *regs)
4479 {
4480 if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
4481 return;
4482
4483 /*
4484 * Some of the ops may be dynamically allocated,
4485 * they must be freed after a synchronize_sched().
4486 */
4487 preempt_disable_notrace();
4488 trace_recursion_set(TRACE_CONTROL_BIT);
4489
4490 /*
4491 * Control funcs (perf) uses RCU. Only trace if
4492 * RCU is currently active.
4493 */
4494 if (!rcu_is_watching())
4495 goto out;
4496
4497 do_for_each_ftrace_op(op, ftrace_control_list) {
4498 if (!(op->flags & FTRACE_OPS_FL_STUB) &&
4499 !ftrace_function_local_disabled(op) &&
4500 ftrace_ops_test(op, ip, regs))
4501 op->func(ip, parent_ip, op, regs);
4502 } while_for_each_ftrace_op(op);
4503 out:
4504 trace_recursion_clear(TRACE_CONTROL_BIT);
4505 preempt_enable_notrace();
4506 }
4507
4508 static struct ftrace_ops control_ops = {
4509 .func = ftrace_ops_control_func,
4510 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4511 INIT_REGEX_LOCK(control_ops)
4512 };
4513
4514 static inline void
4515 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4516 struct ftrace_ops *ignored, struct pt_regs *regs)
4517 {
4518 struct ftrace_ops *op;
4519 int bit;
4520
4521 if (function_trace_stop)
4522 return;
4523
4524 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
4525 if (bit < 0)
4526 return;
4527
4528 /*
4529 * Some of the ops may be dynamically allocated,
4530 * they must be freed after a synchronize_sched().
4531 */
4532 preempt_disable_notrace();
4533 do_for_each_ftrace_op(op, ftrace_ops_list) {
4534 if (ftrace_ops_test(op, ip, regs))
4535 op->func(ip, parent_ip, op, regs);
4536 } while_for_each_ftrace_op(op);
4537 preempt_enable_notrace();
4538 trace_clear_recursion(bit);
4539 }
4540
4541 /*
4542 * Some archs only support passing ip and parent_ip. Even though
4543 * the list function ignores the op parameter, we do not want any
4544 * C side effects, where a function is called without the caller
4545 * sending a third parameter.
4546 * Archs are to support both the regs and ftrace_ops at the same time.
4547 * If they support ftrace_ops, it is assumed they support regs.
4548 * If call backs want to use regs, they must either check for regs
4549 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
4550 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
4551 * An architecture can pass partial regs with ftrace_ops and still
4552 * set the ARCH_SUPPORT_FTARCE_OPS.
4553 */
4554 #if ARCH_SUPPORTS_FTRACE_OPS
4555 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4556 struct ftrace_ops *op, struct pt_regs *regs)
4557 {
4558 __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
4559 }
4560 #else
4561 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
4562 {
4563 __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
4564 }
4565 #endif
4566
4567 static void clear_ftrace_swapper(void)
4568 {
4569 struct task_struct *p;
4570 int cpu;
4571
4572 get_online_cpus();
4573 for_each_online_cpu(cpu) {
4574 p = idle_task(cpu);
4575 clear_tsk_trace_trace(p);
4576 }
4577 put_online_cpus();
4578 }
4579
4580 static void set_ftrace_swapper(void)
4581 {
4582 struct task_struct *p;
4583 int cpu;
4584
4585 get_online_cpus();
4586 for_each_online_cpu(cpu) {
4587 p = idle_task(cpu);
4588 set_tsk_trace_trace(p);
4589 }
4590 put_online_cpus();
4591 }
4592
4593 static void clear_ftrace_pid(struct pid *pid)
4594 {
4595 struct task_struct *p;
4596
4597 rcu_read_lock();
4598 do_each_pid_task(pid, PIDTYPE_PID, p) {
4599 clear_tsk_trace_trace(p);
4600 } while_each_pid_task(pid, PIDTYPE_PID, p);
4601 rcu_read_unlock();
4602
4603 put_pid(pid);
4604 }
4605
4606 static void set_ftrace_pid(struct pid *pid)
4607 {
4608 struct task_struct *p;
4609
4610 rcu_read_lock();
4611 do_each_pid_task(pid, PIDTYPE_PID, p) {
4612 set_tsk_trace_trace(p);
4613 } while_each_pid_task(pid, PIDTYPE_PID, p);
4614 rcu_read_unlock();
4615 }
4616
4617 static void clear_ftrace_pid_task(struct pid *pid)
4618 {
4619 if (pid == ftrace_swapper_pid)
4620 clear_ftrace_swapper();
4621 else
4622 clear_ftrace_pid(pid);
4623 }
4624
4625 static void set_ftrace_pid_task(struct pid *pid)
4626 {
4627 if (pid == ftrace_swapper_pid)
4628 set_ftrace_swapper();
4629 else
4630 set_ftrace_pid(pid);
4631 }
4632
4633 static int ftrace_pid_add(int p)
4634 {
4635 struct pid *pid;
4636 struct ftrace_pid *fpid;
4637 int ret = -EINVAL;
4638
4639 mutex_lock(&ftrace_lock);
4640
4641 if (!p)
4642 pid = ftrace_swapper_pid;
4643 else
4644 pid = find_get_pid(p);
4645
4646 if (!pid)
4647 goto out;
4648
4649 ret = 0;
4650
4651 list_for_each_entry(fpid, &ftrace_pids, list)
4652 if (fpid->pid == pid)
4653 goto out_put;
4654
4655 ret = -ENOMEM;
4656
4657 fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
4658 if (!fpid)
4659 goto out_put;
4660
4661 list_add(&fpid->list, &ftrace_pids);
4662 fpid->pid = pid;
4663
4664 set_ftrace_pid_task(pid);
4665
4666 ftrace_update_pid_func();
4667 ftrace_startup_enable(0);
4668
4669 mutex_unlock(&ftrace_lock);
4670 return 0;
4671
4672 out_put:
4673 if (pid != ftrace_swapper_pid)
4674 put_pid(pid);
4675
4676 out:
4677 mutex_unlock(&ftrace_lock);
4678 return ret;
4679 }
4680
4681 static void ftrace_pid_reset(void)
4682 {
4683 struct ftrace_pid *fpid, *safe;
4684
4685 mutex_lock(&ftrace_lock);
4686 list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
4687 struct pid *pid = fpid->pid;
4688
4689 clear_ftrace_pid_task(pid);
4690
4691 list_del(&fpid->list);
4692 kfree(fpid);
4693 }
4694
4695 ftrace_update_pid_func();
4696 ftrace_startup_enable(0);
4697
4698 mutex_unlock(&ftrace_lock);
4699 }
4700
4701 static void *fpid_start(struct seq_file *m, loff_t *pos)
4702 {
4703 mutex_lock(&ftrace_lock);
4704
4705 if (list_empty(&ftrace_pids) && (!*pos))
4706 return (void *) 1;
4707
4708 return seq_list_start(&ftrace_pids, *pos);
4709 }
4710
4711 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
4712 {
4713 if (v == (void *)1)
4714 return NULL;
4715
4716 return seq_list_next(v, &ftrace_pids, pos);
4717 }
4718
4719 static void fpid_stop(struct seq_file *m, void *p)
4720 {
4721 mutex_unlock(&ftrace_lock);
4722 }
4723
4724 static int fpid_show(struct seq_file *m, void *v)
4725 {
4726 const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
4727
4728 if (v == (void *)1) {
4729 seq_printf(m, "no pid\n");
4730 return 0;
4731 }
4732
4733 if (fpid->pid == ftrace_swapper_pid)
4734 seq_printf(m, "swapper tasks\n");
4735 else
4736 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
4737
4738 return 0;
4739 }
4740
4741 static const struct seq_operations ftrace_pid_sops = {
4742 .start = fpid_start,
4743 .next = fpid_next,
4744 .stop = fpid_stop,
4745 .show = fpid_show,
4746 };
4747
4748 static int
4749 ftrace_pid_open(struct inode *inode, struct file *file)
4750 {
4751 int ret = 0;
4752
4753 if ((file->f_mode & FMODE_WRITE) &&
4754 (file->f_flags & O_TRUNC))
4755 ftrace_pid_reset();
4756
4757 if (file->f_mode & FMODE_READ)
4758 ret = seq_open(file, &ftrace_pid_sops);
4759
4760 return ret;
4761 }
4762
4763 static ssize_t
4764 ftrace_pid_write(struct file *filp, const char __user *ubuf,
4765 size_t cnt, loff_t *ppos)
4766 {
4767 char buf[64], *tmp;
4768 long val;
4769 int ret;
4770
4771 if (cnt >= sizeof(buf))
4772 return -EINVAL;
4773
4774 if (copy_from_user(&buf, ubuf, cnt))
4775 return -EFAULT;
4776
4777 buf[cnt] = 0;
4778
4779 /*
4780 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
4781 * to clean the filter quietly.
4782 */
4783 tmp = strstrip(buf);
4784 if (strlen(tmp) == 0)
4785 return 1;
4786
4787 ret = kstrtol(tmp, 10, &val);
4788 if (ret < 0)
4789 return ret;
4790
4791 ret = ftrace_pid_add(val);
4792
4793 return ret ? ret : cnt;
4794 }
4795
4796 static int
4797 ftrace_pid_release(struct inode *inode, struct file *file)
4798 {
4799 if (file->f_mode & FMODE_READ)
4800 seq_release(inode, file);
4801
4802 return 0;
4803 }
4804
4805 static const struct file_operations ftrace_pid_fops = {
4806 .open = ftrace_pid_open,
4807 .write = ftrace_pid_write,
4808 .read = seq_read,
4809 .llseek = tracing_lseek,
4810 .release = ftrace_pid_release,
4811 };
4812
4813 static __init int ftrace_init_debugfs(void)
4814 {
4815 struct dentry *d_tracer;
4816
4817 d_tracer = tracing_init_dentry();
4818 if (!d_tracer)
4819 return 0;
4820
4821 ftrace_init_dyn_debugfs(d_tracer);
4822
4823 trace_create_file("set_ftrace_pid", 0644, d_tracer,
4824 NULL, &ftrace_pid_fops);
4825
4826 ftrace_profile_debugfs(d_tracer);
4827
4828 return 0;
4829 }
4830 fs_initcall(ftrace_init_debugfs);
4831
4832 /**
4833 * ftrace_kill - kill ftrace
4834 *
4835 * This function should be used by panic code. It stops ftrace
4836 * but in a not so nice way. If you need to simply kill ftrace
4837 * from a non-atomic section, use ftrace_kill.
4838 */
4839 void ftrace_kill(void)
4840 {
4841 ftrace_disabled = 1;
4842 ftrace_enabled = 0;
4843 clear_ftrace_function();
4844 }
4845
4846 /**
4847 * Test if ftrace is dead or not.
4848 */
4849 int ftrace_is_dead(void)
4850 {
4851 return ftrace_disabled;
4852 }
4853
4854 /**
4855 * register_ftrace_function - register a function for profiling
4856 * @ops - ops structure that holds the function for profiling.
4857 *
4858 * Register a function to be called by all functions in the
4859 * kernel.
4860 *
4861 * Note: @ops->func and all the functions it calls must be labeled
4862 * with "notrace", otherwise it will go into a
4863 * recursive loop.
4864 */
4865 int register_ftrace_function(struct ftrace_ops *ops)
4866 {
4867 int ret = -1;
4868
4869 ftrace_ops_init(ops);
4870
4871 mutex_lock(&ftrace_lock);
4872
4873 ret = ftrace_startup(ops, 0);
4874
4875 mutex_unlock(&ftrace_lock);
4876
4877 return ret;
4878 }
4879 EXPORT_SYMBOL_GPL(register_ftrace_function);
4880
4881 /**
4882 * unregister_ftrace_function - unregister a function for profiling.
4883 * @ops - ops structure that holds the function to unregister
4884 *
4885 * Unregister a function that was added to be called by ftrace profiling.
4886 */
4887 int unregister_ftrace_function(struct ftrace_ops *ops)
4888 {
4889 int ret;
4890
4891 mutex_lock(&ftrace_lock);
4892 ret = ftrace_shutdown(ops, 0);
4893 mutex_unlock(&ftrace_lock);
4894
4895 return ret;
4896 }
4897 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
4898
4899 int
4900 ftrace_enable_sysctl(struct ctl_table *table, int write,
4901 void __user *buffer, size_t *lenp,
4902 loff_t *ppos)
4903 {
4904 int ret = -ENODEV;
4905
4906 mutex_lock(&ftrace_lock);
4907
4908 if (unlikely(ftrace_disabled))
4909 goto out;
4910
4911 ret = proc_dointvec(table, write, buffer, lenp, ppos);
4912
4913 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
4914 goto out;
4915
4916 last_ftrace_enabled = !!ftrace_enabled;
4917
4918 if (ftrace_enabled) {
4919
4920 ftrace_startup_sysctl();
4921
4922 /* we are starting ftrace again */
4923 if (ftrace_ops_list != &ftrace_list_end)
4924 update_ftrace_function();
4925
4926 } else {
4927 /* stopping ftrace calls (just send to ftrace_stub) */
4928 ftrace_trace_function = ftrace_stub;
4929
4930 ftrace_shutdown_sysctl();
4931 }
4932
4933 out:
4934 mutex_unlock(&ftrace_lock);
4935 return ret;
4936 }
4937
4938 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4939
4940 static int ftrace_graph_active;
4941 static struct notifier_block ftrace_suspend_notifier;
4942
4943 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
4944 {
4945 return 0;
4946 }
4947
4948 /* The callbacks that hook a function */
4949 trace_func_graph_ret_t ftrace_graph_return =
4950 (trace_func_graph_ret_t)ftrace_stub;
4951 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
4952 static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
4953
4954 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
4955 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
4956 {
4957 int i;
4958 int ret = 0;
4959 unsigned long flags;
4960 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
4961 struct task_struct *g, *t;
4962
4963 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
4964 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
4965 * sizeof(struct ftrace_ret_stack),
4966 GFP_KERNEL);
4967 if (!ret_stack_list[i]) {
4968 start = 0;
4969 end = i;
4970 ret = -ENOMEM;
4971 goto free;
4972 }
4973 }
4974
4975 read_lock_irqsave(&tasklist_lock, flags);
4976 do_each_thread(g, t) {
4977 if (start == end) {
4978 ret = -EAGAIN;
4979 goto unlock;
4980 }
4981
4982 if (t->ret_stack == NULL) {
4983 atomic_set(&t->tracing_graph_pause, 0);
4984 atomic_set(&t->trace_overrun, 0);
4985 t->curr_ret_stack = -1;
4986 /* Make sure the tasks see the -1 first: */
4987 smp_wmb();
4988 t->ret_stack = ret_stack_list[start++];
4989 }
4990 } while_each_thread(g, t);
4991
4992 unlock:
4993 read_unlock_irqrestore(&tasklist_lock, flags);
4994 free:
4995 for (i = start; i < end; i++)
4996 kfree(ret_stack_list[i]);
4997 return ret;
4998 }
4999
5000 static void
5001 ftrace_graph_probe_sched_switch(void *ignore,
5002 struct task_struct *prev, struct task_struct *next)
5003 {
5004 unsigned long long timestamp;
5005 int index;
5006
5007 /*
5008 * Does the user want to count the time a function was asleep.
5009 * If so, do not update the time stamps.
5010 */
5011 if (trace_flags & TRACE_ITER_SLEEP_TIME)
5012 return;
5013
5014 timestamp = trace_clock_local();
5015
5016 prev->ftrace_timestamp = timestamp;
5017
5018 /* only process tasks that we timestamped */
5019 if (!next->ftrace_timestamp)
5020 return;
5021
5022 /*
5023 * Update all the counters in next to make up for the
5024 * time next was sleeping.
5025 */
5026 timestamp -= next->ftrace_timestamp;
5027
5028 for (index = next->curr_ret_stack; index >= 0; index--)
5029 next->ret_stack[index].calltime += timestamp;
5030 }
5031
5032 /* Allocate a return stack for each task */
5033 static int start_graph_tracing(void)
5034 {
5035 struct ftrace_ret_stack **ret_stack_list;
5036 int ret, cpu;
5037
5038 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
5039 sizeof(struct ftrace_ret_stack *),
5040 GFP_KERNEL);
5041
5042 if (!ret_stack_list)
5043 return -ENOMEM;
5044
5045 /* The cpu_boot init_task->ret_stack will never be freed */
5046 for_each_online_cpu(cpu) {
5047 if (!idle_task(cpu)->ret_stack)
5048 ftrace_graph_init_idle_task(idle_task(cpu), cpu);
5049 }
5050
5051 do {
5052 ret = alloc_retstack_tasklist(ret_stack_list);
5053 } while (ret == -EAGAIN);
5054
5055 if (!ret) {
5056 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
5057 if (ret)
5058 pr_info("ftrace_graph: Couldn't activate tracepoint"
5059 " probe to kernel_sched_switch\n");
5060 }
5061
5062 kfree(ret_stack_list);
5063 return ret;
5064 }
5065
5066 /*
5067 * Hibernation protection.
5068 * The state of the current task is too much unstable during
5069 * suspend/restore to disk. We want to protect against that.
5070 */
5071 static int
5072 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
5073 void *unused)
5074 {
5075 switch (state) {
5076 case PM_HIBERNATION_PREPARE:
5077 pause_graph_tracing();
5078 break;
5079
5080 case PM_POST_HIBERNATION:
5081 unpause_graph_tracing();
5082 break;
5083 }
5084 return NOTIFY_DONE;
5085 }
5086
5087 /* Just a place holder for function graph */
5088 static struct ftrace_ops fgraph_ops __read_mostly = {
5089 .func = ftrace_stub,
5090 .flags = FTRACE_OPS_FL_STUB | FTRACE_OPS_FL_GLOBAL |
5091 FTRACE_OPS_FL_RECURSION_SAFE,
5092 };
5093
5094 static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
5095 {
5096 if (!ftrace_ops_test(&global_ops, trace->func, NULL))
5097 return 0;
5098 return __ftrace_graph_entry(trace);
5099 }
5100
5101 /*
5102 * The function graph tracer should only trace the functions defined
5103 * by set_ftrace_filter and set_ftrace_notrace. If another function
5104 * tracer ops is registered, the graph tracer requires testing the
5105 * function against the global ops, and not just trace any function
5106 * that any ftrace_ops registered.
5107 */
5108 static void update_function_graph_func(void)
5109 {
5110 if (ftrace_ops_list == &ftrace_list_end ||
5111 (ftrace_ops_list == &global_ops &&
5112 global_ops.next == &ftrace_list_end))
5113 ftrace_graph_entry = __ftrace_graph_entry;
5114 else
5115 ftrace_graph_entry = ftrace_graph_entry_test;
5116 }
5117
5118 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
5119 trace_func_graph_ent_t entryfunc)
5120 {
5121 int ret = 0;
5122
5123 mutex_lock(&ftrace_lock);
5124
5125 /* we currently allow only one tracer registered at a time */
5126 if (ftrace_graph_active) {
5127 ret = -EBUSY;
5128 goto out;
5129 }
5130
5131 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
5132 register_pm_notifier(&ftrace_suspend_notifier);
5133
5134 ftrace_graph_active++;
5135 ret = start_graph_tracing();
5136 if (ret) {
5137 ftrace_graph_active--;
5138 goto out;
5139 }
5140
5141 ftrace_graph_return = retfunc;
5142
5143 /*
5144 * Update the indirect function to the entryfunc, and the
5145 * function that gets called to the entry_test first. Then
5146 * call the update fgraph entry function to determine if
5147 * the entryfunc should be called directly or not.
5148 */
5149 __ftrace_graph_entry = entryfunc;
5150 ftrace_graph_entry = ftrace_graph_entry_test;
5151 update_function_graph_func();
5152
5153 ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET);
5154
5155 out:
5156 mutex_unlock(&ftrace_lock);
5157 return ret;
5158 }
5159
5160 void unregister_ftrace_graph(void)
5161 {
5162 mutex_lock(&ftrace_lock);
5163
5164 if (unlikely(!ftrace_graph_active))
5165 goto out;
5166
5167 ftrace_graph_active--;
5168 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
5169 ftrace_graph_entry = ftrace_graph_entry_stub;
5170 __ftrace_graph_entry = ftrace_graph_entry_stub;
5171 ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET);
5172 unregister_pm_notifier(&ftrace_suspend_notifier);
5173 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
5174
5175 out:
5176 mutex_unlock(&ftrace_lock);
5177 }
5178
5179 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
5180
5181 static void
5182 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
5183 {
5184 atomic_set(&t->tracing_graph_pause, 0);
5185 atomic_set(&t->trace_overrun, 0);
5186 t->ftrace_timestamp = 0;
5187 /* make curr_ret_stack visible before we add the ret_stack */
5188 smp_wmb();
5189 t->ret_stack = ret_stack;
5190 }
5191
5192 /*
5193 * Allocate a return stack for the idle task. May be the first
5194 * time through, or it may be done by CPU hotplug online.
5195 */
5196 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
5197 {
5198 t->curr_ret_stack = -1;
5199 /*
5200 * The idle task has no parent, it either has its own
5201 * stack or no stack at all.
5202 */
5203 if (t->ret_stack)
5204 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
5205
5206 if (ftrace_graph_active) {
5207 struct ftrace_ret_stack *ret_stack;
5208
5209 ret_stack = per_cpu(idle_ret_stack, cpu);
5210 if (!ret_stack) {
5211 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
5212 * sizeof(struct ftrace_ret_stack),
5213 GFP_KERNEL);
5214 if (!ret_stack)
5215 return;
5216 per_cpu(idle_ret_stack, cpu) = ret_stack;
5217 }
5218 graph_init_task(t, ret_stack);
5219 }
5220 }
5221
5222 /* Allocate a return stack for newly created task */
5223 void ftrace_graph_init_task(struct task_struct *t)
5224 {
5225 /* Make sure we do not use the parent ret_stack */
5226 t->ret_stack = NULL;
5227 t->curr_ret_stack = -1;
5228
5229 if (ftrace_graph_active) {
5230 struct ftrace_ret_stack *ret_stack;
5231
5232 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
5233 * sizeof(struct ftrace_ret_stack),
5234 GFP_KERNEL);
5235 if (!ret_stack)
5236 return;
5237 graph_init_task(t, ret_stack);
5238 }
5239 }
5240
5241 void ftrace_graph_exit_task(struct task_struct *t)
5242 {
5243 struct ftrace_ret_stack *ret_stack = t->ret_stack;
5244
5245 t->ret_stack = NULL;
5246 /* NULL must become visible to IRQs before we free it: */
5247 barrier();
5248
5249 kfree(ret_stack);
5250 }
5251
5252 void ftrace_graph_stop(void)
5253 {
5254 ftrace_stop();
5255 }
5256 #endif
This page took 0.320447 seconds and 6 git commands to generate.