function-graph/x86: Replace unbalanced ret with jmp
[deliverable/linux.git] / kernel / trace / ftrace.c
CommitLineData
16444a8a
ACM
1/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
3d083395
SR
16#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
5072c59f 19#include <linux/seq_file.h>
4a2b8dda 20#include <linux/suspend.h>
5072c59f 21#include <linux/debugfs.h>
3d083395 22#include <linux/hardirq.h>
2d8b820b 23#include <linux/kthread.h>
5072c59f 24#include <linux/uaccess.h>
f22f9a89 25#include <linux/kprobes.h>
2d8b820b 26#include <linux/ftrace.h>
b0fc494f 27#include <linux/sysctl.h>
5072c59f 28#include <linux/ctype.h>
3d083395 29#include <linux/list.h>
59df055f 30#include <linux/hash.h>
3d083395 31
ad8d75ff 32#include <trace/events/sched.h>
8aef2d28 33
395a59d0 34#include <asm/ftrace.h>
2af15d6a 35#include <asm/setup.h>
395a59d0 36
0706f1c4 37#include "trace_output.h"
bac429f0 38#include "trace_stat.h"
16444a8a 39
6912896e
SR
40#define FTRACE_WARN_ON(cond) \
41 do { \
42 if (WARN_ON(cond)) \
43 ftrace_kill(); \
44 } while (0)
45
46#define FTRACE_WARN_ON_ONCE(cond) \
47 do { \
48 if (WARN_ON_ONCE(cond)) \
49 ftrace_kill(); \
50 } while (0)
51
8fc0c701
SR
52/* hash bits for specific function selection */
53#define FTRACE_HASH_BITS 7
54#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
55
4eebcc81
SR
56/* ftrace_enabled is a method to turn ftrace on or off */
57int ftrace_enabled __read_mostly;
d61f82d0 58static int last_ftrace_enabled;
b0fc494f 59
60a7ecf4
SR
60/* Quick disabling of function tracer. */
61int function_trace_stop;
62
4eebcc81
SR
63/*
64 * ftrace_disabled is set when an anomaly is discovered.
65 * ftrace_disabled is much stronger than ftrace_enabled.
66 */
67static int ftrace_disabled __read_mostly;
68
52baf119 69static DEFINE_MUTEX(ftrace_lock);
b0fc494f 70
16444a8a
ACM
71static struct ftrace_ops ftrace_list_end __read_mostly =
72{
fb9fb015 73 .func = ftrace_stub,
16444a8a
ACM
74};
75
76static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
77ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
60a7ecf4 78ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
df4fc315 79ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
16444a8a 80
369bc18f
SA
81#ifdef CONFIG_FUNCTION_GRAPH_TRACER
82static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
83#endif
84
f2252935 85static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
16444a8a
ACM
86{
87 struct ftrace_ops *op = ftrace_list;
88
89 /* in case someone actually ports this to alpha! */
90 read_barrier_depends();
91
92 while (op != &ftrace_list_end) {
93 /* silly alpha */
94 read_barrier_depends();
95 op->func(ip, parent_ip);
96 op = op->next;
97 };
98}
99
df4fc315
SR
100static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
101{
0ef8cde5 102 if (!test_tsk_trace_trace(current))
df4fc315
SR
103 return;
104
105 ftrace_pid_function(ip, parent_ip);
106}
107
108static void set_ftrace_pid_function(ftrace_func_t func)
109{
110 /* do not set ftrace_pid_function to itself! */
111 if (func != ftrace_pid_func)
112 ftrace_pid_function = func;
113}
114
16444a8a 115/**
3d083395 116 * clear_ftrace_function - reset the ftrace function
16444a8a 117 *
3d083395
SR
118 * This NULLs the ftrace function and in essence stops
119 * tracing. There may be lag
16444a8a 120 */
3d083395 121void clear_ftrace_function(void)
16444a8a 122{
3d083395 123 ftrace_trace_function = ftrace_stub;
60a7ecf4 124 __ftrace_trace_function = ftrace_stub;
df4fc315 125 ftrace_pid_function = ftrace_stub;
3d083395
SR
126}
127
60a7ecf4
SR
128#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
129/*
130 * For those archs that do not test ftrace_trace_stop in their
131 * mcount call site, we need to do it from C.
132 */
133static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
134{
135 if (function_trace_stop)
136 return;
137
138 __ftrace_trace_function(ip, parent_ip);
139}
140#endif
141
e309b41d 142static int __register_ftrace_function(struct ftrace_ops *ops)
3d083395 143{
16444a8a
ACM
144 ops->next = ftrace_list;
145 /*
146 * We are entering ops into the ftrace_list but another
147 * CPU might be walking that list. We need to make sure
148 * the ops->next pointer is valid before another CPU sees
149 * the ops pointer included into the ftrace_list.
150 */
151 smp_wmb();
152 ftrace_list = ops;
3d083395 153
b0fc494f 154 if (ftrace_enabled) {
df4fc315
SR
155 ftrace_func_t func;
156
157 if (ops->next == &ftrace_list_end)
158 func = ops->func;
159 else
160 func = ftrace_list_func;
161
978f3a45 162 if (ftrace_pid_trace) {
df4fc315
SR
163 set_ftrace_pid_function(func);
164 func = ftrace_pid_func;
165 }
166
b0fc494f
SR
167 /*
168 * For one func, simply call it directly.
169 * For more than one func, call the chain.
170 */
60a7ecf4 171#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
df4fc315 172 ftrace_trace_function = func;
60a7ecf4 173#else
df4fc315 174 __ftrace_trace_function = func;
60a7ecf4
SR
175 ftrace_trace_function = ftrace_test_stop_func;
176#endif
b0fc494f 177 }
3d083395 178
16444a8a
ACM
179 return 0;
180}
181
e309b41d 182static int __unregister_ftrace_function(struct ftrace_ops *ops)
16444a8a 183{
16444a8a 184 struct ftrace_ops **p;
16444a8a
ACM
185
186 /*
3d083395
SR
187 * If we are removing the last function, then simply point
188 * to the ftrace_stub.
16444a8a
ACM
189 */
190 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
191 ftrace_trace_function = ftrace_stub;
192 ftrace_list = &ftrace_list_end;
e6ea44e9 193 return 0;
16444a8a
ACM
194 }
195
196 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
197 if (*p == ops)
198 break;
199
e6ea44e9
SR
200 if (*p != ops)
201 return -1;
16444a8a
ACM
202
203 *p = (*p)->next;
204
b0fc494f
SR
205 if (ftrace_enabled) {
206 /* If we only have one func left, then call that directly */
df4fc315
SR
207 if (ftrace_list->next == &ftrace_list_end) {
208 ftrace_func_t func = ftrace_list->func;
209
978f3a45 210 if (ftrace_pid_trace) {
df4fc315
SR
211 set_ftrace_pid_function(func);
212 func = ftrace_pid_func;
213 }
214#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
215 ftrace_trace_function = func;
216#else
217 __ftrace_trace_function = func;
218#endif
219 }
b0fc494f 220 }
16444a8a 221
e6ea44e9 222 return 0;
3d083395
SR
223}
224
df4fc315
SR
225static void ftrace_update_pid_func(void)
226{
227 ftrace_func_t func;
228
df4fc315 229 if (ftrace_trace_function == ftrace_stub)
10dd3ebe 230 return;
df4fc315 231
33974093 232#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
df4fc315 233 func = ftrace_trace_function;
33974093
MF
234#else
235 func = __ftrace_trace_function;
236#endif
df4fc315 237
978f3a45 238 if (ftrace_pid_trace) {
df4fc315
SR
239 set_ftrace_pid_function(func);
240 func = ftrace_pid_func;
241 } else {
66eafebc
LW
242 if (func == ftrace_pid_func)
243 func = ftrace_pid_function;
df4fc315
SR
244 }
245
246#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
247 ftrace_trace_function = func;
248#else
249 __ftrace_trace_function = func;
250#endif
df4fc315
SR
251}
252
493762fc
SR
253#ifdef CONFIG_FUNCTION_PROFILER
254struct ftrace_profile {
255 struct hlist_node node;
256 unsigned long ip;
257 unsigned long counter;
0706f1c4
SR
258#ifdef CONFIG_FUNCTION_GRAPH_TRACER
259 unsigned long long time;
260#endif
8fc0c701
SR
261};
262
493762fc
SR
263struct ftrace_profile_page {
264 struct ftrace_profile_page *next;
265 unsigned long index;
266 struct ftrace_profile records[];
d61f82d0
SR
267};
268
cafb168a
SR
269struct ftrace_profile_stat {
270 atomic_t disabled;
271 struct hlist_head *hash;
272 struct ftrace_profile_page *pages;
273 struct ftrace_profile_page *start;
274 struct tracer_stat stat;
275};
276
493762fc
SR
277#define PROFILE_RECORDS_SIZE \
278 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
5072c59f 279
493762fc
SR
280#define PROFILES_PER_PAGE \
281 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
3d083395 282
fb9fb015
SR
283static int ftrace_profile_bits __read_mostly;
284static int ftrace_profile_enabled __read_mostly;
285
286/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
bac429f0
SR
287static DEFINE_MUTEX(ftrace_profile_lock);
288
cafb168a 289static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
493762fc
SR
290
291#define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
292
bac429f0
SR
293static void *
294function_stat_next(void *v, int idx)
295{
493762fc
SR
296 struct ftrace_profile *rec = v;
297 struct ftrace_profile_page *pg;
bac429f0 298
493762fc 299 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
bac429f0
SR
300
301 again:
0296e425
LZ
302 if (idx != 0)
303 rec++;
304
bac429f0
SR
305 if ((void *)rec >= (void *)&pg->records[pg->index]) {
306 pg = pg->next;
307 if (!pg)
308 return NULL;
309 rec = &pg->records[0];
493762fc
SR
310 if (!rec->counter)
311 goto again;
bac429f0
SR
312 }
313
bac429f0
SR
314 return rec;
315}
316
317static void *function_stat_start(struct tracer_stat *trace)
318{
cafb168a
SR
319 struct ftrace_profile_stat *stat =
320 container_of(trace, struct ftrace_profile_stat, stat);
321
322 if (!stat || !stat->start)
323 return NULL;
324
325 return function_stat_next(&stat->start->records[0], 0);
bac429f0
SR
326}
327
0706f1c4
SR
328#ifdef CONFIG_FUNCTION_GRAPH_TRACER
329/* function graph compares on total time */
330static int function_stat_cmp(void *p1, void *p2)
331{
332 struct ftrace_profile *a = p1;
333 struct ftrace_profile *b = p2;
334
335 if (a->time < b->time)
336 return -1;
337 if (a->time > b->time)
338 return 1;
339 else
340 return 0;
341}
342#else
343/* not function graph compares against hits */
bac429f0
SR
344static int function_stat_cmp(void *p1, void *p2)
345{
493762fc
SR
346 struct ftrace_profile *a = p1;
347 struct ftrace_profile *b = p2;
bac429f0
SR
348
349 if (a->counter < b->counter)
350 return -1;
351 if (a->counter > b->counter)
352 return 1;
353 else
354 return 0;
355}
0706f1c4 356#endif
bac429f0
SR
357
358static int function_stat_headers(struct seq_file *m)
359{
0706f1c4 360#ifdef CONFIG_FUNCTION_GRAPH_TRACER
34886c8b
SR
361 seq_printf(m, " Function "
362 "Hit Time Avg\n"
363 " -------- "
364 "--- ---- ---\n");
0706f1c4 365#else
bac429f0
SR
366 seq_printf(m, " Function Hit\n"
367 " -------- ---\n");
0706f1c4 368#endif
bac429f0
SR
369 return 0;
370}
371
372static int function_stat_show(struct seq_file *m, void *v)
373{
493762fc 374 struct ftrace_profile *rec = v;
bac429f0 375 char str[KSYM_SYMBOL_LEN];
0706f1c4 376#ifdef CONFIG_FUNCTION_GRAPH_TRACER
0706f1c4 377 static DEFINE_MUTEX(mutex);
34886c8b
SR
378 static struct trace_seq s;
379 unsigned long long avg;
0706f1c4 380#endif
bac429f0
SR
381
382 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
0706f1c4
SR
383 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
384
385#ifdef CONFIG_FUNCTION_GRAPH_TRACER
386 seq_printf(m, " ");
34886c8b
SR
387 avg = rec->time;
388 do_div(avg, rec->counter);
389
390 mutex_lock(&mutex);
391 trace_seq_init(&s);
392 trace_print_graph_duration(rec->time, &s);
393 trace_seq_puts(&s, " ");
394 trace_print_graph_duration(avg, &s);
0706f1c4
SR
395 trace_print_seq(m, &s);
396 mutex_unlock(&mutex);
397#endif
398 seq_putc(m, '\n');
bac429f0 399
bac429f0
SR
400 return 0;
401}
402
cafb168a 403static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
bac429f0 404{
493762fc 405 struct ftrace_profile_page *pg;
bac429f0 406
cafb168a 407 pg = stat->pages = stat->start;
bac429f0 408
493762fc
SR
409 while (pg) {
410 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
411 pg->index = 0;
412 pg = pg->next;
bac429f0
SR
413 }
414
cafb168a 415 memset(stat->hash, 0,
493762fc
SR
416 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
417}
bac429f0 418
cafb168a 419int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
493762fc
SR
420{
421 struct ftrace_profile_page *pg;
318e0a73
SR
422 int functions;
423 int pages;
493762fc 424 int i;
bac429f0 425
493762fc 426 /* If we already allocated, do nothing */
cafb168a 427 if (stat->pages)
493762fc 428 return 0;
bac429f0 429
cafb168a
SR
430 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
431 if (!stat->pages)
493762fc 432 return -ENOMEM;
bac429f0 433
318e0a73
SR
434#ifdef CONFIG_DYNAMIC_FTRACE
435 functions = ftrace_update_tot_cnt;
436#else
437 /*
438 * We do not know the number of functions that exist because
439 * dynamic tracing is what counts them. With past experience
440 * we have around 20K functions. That should be more than enough.
441 * It is highly unlikely we will execute every function in
442 * the kernel.
443 */
444 functions = 20000;
445#endif
446
cafb168a 447 pg = stat->start = stat->pages;
bac429f0 448
318e0a73
SR
449 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
450
451 for (i = 0; i < pages; i++) {
493762fc 452 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
493762fc 453 if (!pg->next)
318e0a73 454 goto out_free;
493762fc
SR
455 pg = pg->next;
456 }
457
458 return 0;
318e0a73
SR
459
460 out_free:
461 pg = stat->start;
462 while (pg) {
463 unsigned long tmp = (unsigned long)pg;
464
465 pg = pg->next;
466 free_page(tmp);
467 }
468
469 free_page((unsigned long)stat->pages);
470 stat->pages = NULL;
471 stat->start = NULL;
472
473 return -ENOMEM;
bac429f0
SR
474}
475
cafb168a 476static int ftrace_profile_init_cpu(int cpu)
bac429f0 477{
cafb168a 478 struct ftrace_profile_stat *stat;
493762fc 479 int size;
bac429f0 480
cafb168a
SR
481 stat = &per_cpu(ftrace_profile_stats, cpu);
482
483 if (stat->hash) {
493762fc 484 /* If the profile is already created, simply reset it */
cafb168a 485 ftrace_profile_reset(stat);
493762fc
SR
486 return 0;
487 }
bac429f0 488
493762fc
SR
489 /*
490 * We are profiling all functions, but usually only a few thousand
491 * functions are hit. We'll make a hash of 1024 items.
492 */
493 size = FTRACE_PROFILE_HASH_SIZE;
bac429f0 494
cafb168a 495 stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
493762fc 496
cafb168a 497 if (!stat->hash)
493762fc
SR
498 return -ENOMEM;
499
cafb168a
SR
500 if (!ftrace_profile_bits) {
501 size--;
493762fc 502
cafb168a
SR
503 for (; size; size >>= 1)
504 ftrace_profile_bits++;
505 }
493762fc 506
318e0a73 507 /* Preallocate the function profiling pages */
cafb168a
SR
508 if (ftrace_profile_pages_init(stat) < 0) {
509 kfree(stat->hash);
510 stat->hash = NULL;
493762fc
SR
511 return -ENOMEM;
512 }
513
514 return 0;
bac429f0
SR
515}
516
cafb168a
SR
517static int ftrace_profile_init(void)
518{
519 int cpu;
520 int ret = 0;
521
522 for_each_online_cpu(cpu) {
523 ret = ftrace_profile_init_cpu(cpu);
524 if (ret)
525 break;
526 }
527
528 return ret;
529}
530
493762fc 531/* interrupts must be disabled */
cafb168a
SR
532static struct ftrace_profile *
533ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
bac429f0 534{
493762fc 535 struct ftrace_profile *rec;
bac429f0
SR
536 struct hlist_head *hhd;
537 struct hlist_node *n;
bac429f0
SR
538 unsigned long key;
539
bac429f0 540 key = hash_long(ip, ftrace_profile_bits);
cafb168a 541 hhd = &stat->hash[key];
bac429f0
SR
542
543 if (hlist_empty(hhd))
544 return NULL;
545
bac429f0
SR
546 hlist_for_each_entry_rcu(rec, n, hhd, node) {
547 if (rec->ip == ip)
493762fc
SR
548 return rec;
549 }
550
551 return NULL;
552}
553
cafb168a
SR
554static void ftrace_add_profile(struct ftrace_profile_stat *stat,
555 struct ftrace_profile *rec)
493762fc
SR
556{
557 unsigned long key;
558
559 key = hash_long(rec->ip, ftrace_profile_bits);
cafb168a 560 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
493762fc
SR
561}
562
318e0a73
SR
563/*
564 * The memory is already allocated, this simply finds a new record to use.
565 */
493762fc 566static struct ftrace_profile *
318e0a73 567ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
493762fc
SR
568{
569 struct ftrace_profile *rec = NULL;
570
318e0a73 571 /* prevent recursion (from NMIs) */
cafb168a 572 if (atomic_inc_return(&stat->disabled) != 1)
493762fc
SR
573 goto out;
574
493762fc 575 /*
318e0a73
SR
576 * Try to find the function again since an NMI
577 * could have added it
493762fc 578 */
cafb168a 579 rec = ftrace_find_profiled_func(stat, ip);
493762fc 580 if (rec)
cafb168a 581 goto out;
493762fc 582
cafb168a
SR
583 if (stat->pages->index == PROFILES_PER_PAGE) {
584 if (!stat->pages->next)
585 goto out;
586 stat->pages = stat->pages->next;
bac429f0 587 }
493762fc 588
cafb168a 589 rec = &stat->pages->records[stat->pages->index++];
493762fc 590 rec->ip = ip;
cafb168a 591 ftrace_add_profile(stat, rec);
493762fc 592
bac429f0 593 out:
cafb168a 594 atomic_dec(&stat->disabled);
bac429f0
SR
595
596 return rec;
597}
598
599static void
600function_profile_call(unsigned long ip, unsigned long parent_ip)
601{
cafb168a 602 struct ftrace_profile_stat *stat;
493762fc 603 struct ftrace_profile *rec;
bac429f0
SR
604 unsigned long flags;
605
606 if (!ftrace_profile_enabled)
607 return;
608
609 local_irq_save(flags);
cafb168a
SR
610
611 stat = &__get_cpu_var(ftrace_profile_stats);
0f6ce3de 612 if (!stat->hash || !ftrace_profile_enabled)
cafb168a
SR
613 goto out;
614
615 rec = ftrace_find_profiled_func(stat, ip);
493762fc 616 if (!rec) {
318e0a73 617 rec = ftrace_profile_alloc(stat, ip);
493762fc
SR
618 if (!rec)
619 goto out;
620 }
bac429f0
SR
621
622 rec->counter++;
623 out:
624 local_irq_restore(flags);
625}
626
0706f1c4
SR
627#ifdef CONFIG_FUNCTION_GRAPH_TRACER
628static int profile_graph_entry(struct ftrace_graph_ent *trace)
629{
630 function_profile_call(trace->func, 0);
631 return 1;
632}
633
634static void profile_graph_return(struct ftrace_graph_ret *trace)
635{
cafb168a 636 struct ftrace_profile_stat *stat;
a2a16d6a 637 unsigned long long calltime;
0706f1c4 638 struct ftrace_profile *rec;
cafb168a 639 unsigned long flags;
0706f1c4
SR
640
641 local_irq_save(flags);
cafb168a 642 stat = &__get_cpu_var(ftrace_profile_stats);
0f6ce3de 643 if (!stat->hash || !ftrace_profile_enabled)
cafb168a
SR
644 goto out;
645
a2a16d6a
SR
646 calltime = trace->rettime - trace->calltime;
647
648 if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
649 int index;
650
651 index = trace->depth;
652
653 /* Append this call time to the parent time to subtract */
654 if (index)
655 current->ret_stack[index - 1].subtime += calltime;
656
657 if (current->ret_stack[index].subtime < calltime)
658 calltime -= current->ret_stack[index].subtime;
659 else
660 calltime = 0;
661 }
662
cafb168a 663 rec = ftrace_find_profiled_func(stat, trace->func);
0706f1c4 664 if (rec)
a2a16d6a
SR
665 rec->time += calltime;
666
cafb168a 667 out:
0706f1c4
SR
668 local_irq_restore(flags);
669}
670
671static int register_ftrace_profiler(void)
672{
673 return register_ftrace_graph(&profile_graph_return,
674 &profile_graph_entry);
675}
676
677static void unregister_ftrace_profiler(void)
678{
679 unregister_ftrace_graph();
680}
681#else
bac429f0
SR
682static struct ftrace_ops ftrace_profile_ops __read_mostly =
683{
fb9fb015 684 .func = function_profile_call,
bac429f0
SR
685};
686
0706f1c4
SR
687static int register_ftrace_profiler(void)
688{
689 return register_ftrace_function(&ftrace_profile_ops);
690}
691
692static void unregister_ftrace_profiler(void)
693{
694 unregister_ftrace_function(&ftrace_profile_ops);
695}
696#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
697
bac429f0
SR
698static ssize_t
699ftrace_profile_write(struct file *filp, const char __user *ubuf,
700 size_t cnt, loff_t *ppos)
701{
702 unsigned long val;
fb9fb015 703 char buf[64]; /* big enough to hold a number */
bac429f0
SR
704 int ret;
705
bac429f0
SR
706 if (cnt >= sizeof(buf))
707 return -EINVAL;
708
709 if (copy_from_user(&buf, ubuf, cnt))
710 return -EFAULT;
711
712 buf[cnt] = 0;
713
714 ret = strict_strtoul(buf, 10, &val);
715 if (ret < 0)
716 return ret;
717
718 val = !!val;
719
720 mutex_lock(&ftrace_profile_lock);
721 if (ftrace_profile_enabled ^ val) {
722 if (val) {
493762fc
SR
723 ret = ftrace_profile_init();
724 if (ret < 0) {
725 cnt = ret;
726 goto out;
727 }
728
0706f1c4
SR
729 ret = register_ftrace_profiler();
730 if (ret < 0) {
731 cnt = ret;
732 goto out;
733 }
bac429f0
SR
734 ftrace_profile_enabled = 1;
735 } else {
736 ftrace_profile_enabled = 0;
0f6ce3de
SR
737 /*
738 * unregister_ftrace_profiler calls stop_machine
739 * so this acts like an synchronize_sched.
740 */
0706f1c4 741 unregister_ftrace_profiler();
bac429f0
SR
742 }
743 }
493762fc 744 out:
bac429f0
SR
745 mutex_unlock(&ftrace_profile_lock);
746
747 filp->f_pos += cnt;
748
749 return cnt;
750}
751
493762fc
SR
752static ssize_t
753ftrace_profile_read(struct file *filp, char __user *ubuf,
754 size_t cnt, loff_t *ppos)
755{
fb9fb015 756 char buf[64]; /* big enough to hold a number */
493762fc
SR
757 int r;
758
759 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
760 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
761}
762
bac429f0
SR
763static const struct file_operations ftrace_profile_fops = {
764 .open = tracing_open_generic,
765 .read = ftrace_profile_read,
766 .write = ftrace_profile_write,
767};
768
cafb168a
SR
769/* used to initialize the real stat files */
770static struct tracer_stat function_stats __initdata = {
fb9fb015
SR
771 .name = "functions",
772 .stat_start = function_stat_start,
773 .stat_next = function_stat_next,
774 .stat_cmp = function_stat_cmp,
775 .stat_headers = function_stat_headers,
776 .stat_show = function_stat_show
cafb168a
SR
777};
778
6ab5d668 779static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
bac429f0 780{
cafb168a 781 struct ftrace_profile_stat *stat;
bac429f0 782 struct dentry *entry;
cafb168a 783 char *name;
bac429f0 784 int ret;
cafb168a
SR
785 int cpu;
786
787 for_each_possible_cpu(cpu) {
788 stat = &per_cpu(ftrace_profile_stats, cpu);
789
790 /* allocate enough for function name + cpu number */
791 name = kmalloc(32, GFP_KERNEL);
792 if (!name) {
793 /*
794 * The files created are permanent, if something happens
795 * we still do not free memory.
796 */
cafb168a
SR
797 WARN(1,
798 "Could not allocate stat file for cpu %d\n",
799 cpu);
800 return;
801 }
802 stat->stat = function_stats;
803 snprintf(name, 32, "function%d", cpu);
804 stat->stat.name = name;
805 ret = register_stat_tracer(&stat->stat);
806 if (ret) {
807 WARN(1,
808 "Could not register function stat for cpu %d\n",
809 cpu);
810 kfree(name);
811 return;
812 }
bac429f0
SR
813 }
814
815 entry = debugfs_create_file("function_profile_enabled", 0644,
816 d_tracer, NULL, &ftrace_profile_fops);
817 if (!entry)
818 pr_warning("Could not create debugfs "
819 "'function_profile_enabled' entry\n");
820}
821
bac429f0 822#else /* CONFIG_FUNCTION_PROFILER */
6ab5d668 823static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
bac429f0
SR
824{
825}
bac429f0
SR
826#endif /* CONFIG_FUNCTION_PROFILER */
827
493762fc
SR
828/* set when tracing only a pid */
829struct pid *ftrace_pid_trace;
830static struct pid * const ftrace_swapper_pid = &init_struct_pid;
831
832#ifdef CONFIG_DYNAMIC_FTRACE
833
834#ifndef CONFIG_FTRACE_MCOUNT_RECORD
835# error Dynamic ftrace depends on MCOUNT_RECORD
836#endif
837
838static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
839
840struct ftrace_func_probe {
841 struct hlist_node node;
842 struct ftrace_probe_ops *ops;
843 unsigned long flags;
844 unsigned long ip;
845 void *data;
846 struct rcu_head rcu;
847};
848
849enum {
850 FTRACE_ENABLE_CALLS = (1 << 0),
851 FTRACE_DISABLE_CALLS = (1 << 1),
852 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
853 FTRACE_ENABLE_MCOUNT = (1 << 3),
854 FTRACE_DISABLE_MCOUNT = (1 << 4),
855 FTRACE_START_FUNC_RET = (1 << 5),
856 FTRACE_STOP_FUNC_RET = (1 << 6),
857};
858
859static int ftrace_filtered;
860
861static struct dyn_ftrace *ftrace_new_addrs;
862
863static DEFINE_MUTEX(ftrace_regex_lock);
864
865struct ftrace_page {
866 struct ftrace_page *next;
867 int index;
868 struct dyn_ftrace records[];
869};
870
871#define ENTRIES_PER_PAGE \
872 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
873
874/* estimate from running different kernels */
875#define NR_TO_INIT 10000
876
877static struct ftrace_page *ftrace_pages_start;
878static struct ftrace_page *ftrace_pages;
879
880static struct dyn_ftrace *ftrace_free_records;
881
882/*
883 * This is a double for. Do not use 'break' to break out of the loop,
884 * you must use a goto.
885 */
886#define do_for_each_ftrace_rec(pg, rec) \
887 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
888 int _____i; \
889 for (_____i = 0; _____i < pg->index; _____i++) { \
890 rec = &pg->records[_____i];
891
892#define while_for_each_ftrace_rec() \
893 } \
894 }
895
ecea656d 896#ifdef CONFIG_KPROBES
f17845e5
IM
897
898static int frozen_record_count;
899
ecea656d
AS
900static inline void freeze_record(struct dyn_ftrace *rec)
901{
902 if (!(rec->flags & FTRACE_FL_FROZEN)) {
903 rec->flags |= FTRACE_FL_FROZEN;
904 frozen_record_count++;
905 }
906}
907
908static inline void unfreeze_record(struct dyn_ftrace *rec)
909{
910 if (rec->flags & FTRACE_FL_FROZEN) {
911 rec->flags &= ~FTRACE_FL_FROZEN;
912 frozen_record_count--;
913 }
914}
915
916static inline int record_frozen(struct dyn_ftrace *rec)
917{
918 return rec->flags & FTRACE_FL_FROZEN;
919}
920#else
921# define freeze_record(rec) ({ 0; })
922# define unfreeze_record(rec) ({ 0; })
923# define record_frozen(rec) ({ 0; })
924#endif /* CONFIG_KPROBES */
925
e309b41d 926static void ftrace_free_rec(struct dyn_ftrace *rec)
37ad5084 927{
ee000b7f 928 rec->freelist = ftrace_free_records;
37ad5084
SR
929 ftrace_free_records = rec;
930 rec->flags |= FTRACE_FL_FREE;
931}
932
e309b41d 933static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
3c1720f0 934{
37ad5084
SR
935 struct dyn_ftrace *rec;
936
937 /* First check for freed records */
938 if (ftrace_free_records) {
939 rec = ftrace_free_records;
940
37ad5084 941 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
6912896e 942 FTRACE_WARN_ON_ONCE(1);
37ad5084
SR
943 ftrace_free_records = NULL;
944 return NULL;
945 }
946
ee000b7f 947 ftrace_free_records = rec->freelist;
37ad5084
SR
948 memset(rec, 0, sizeof(*rec));
949 return rec;
950 }
951
3c1720f0 952 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
08f5ac90
SR
953 if (!ftrace_pages->next) {
954 /* allocate another page */
955 ftrace_pages->next =
956 (void *)get_zeroed_page(GFP_KERNEL);
957 if (!ftrace_pages->next)
958 return NULL;
959 }
3c1720f0
SR
960 ftrace_pages = ftrace_pages->next;
961 }
962
963 return &ftrace_pages->records[ftrace_pages->index++];
964}
965
08f5ac90 966static struct dyn_ftrace *
d61f82d0 967ftrace_record_ip(unsigned long ip)
3d083395 968{
08f5ac90 969 struct dyn_ftrace *rec;
3d083395 970
f3c7ac40 971 if (ftrace_disabled)
08f5ac90 972 return NULL;
3d083395 973
08f5ac90
SR
974 rec = ftrace_alloc_dyn_node(ip);
975 if (!rec)
976 return NULL;
3d083395 977
08f5ac90 978 rec->ip = ip;
ee000b7f 979 rec->newlist = ftrace_new_addrs;
e94142a6 980 ftrace_new_addrs = rec;
3d083395 981
08f5ac90 982 return rec;
3d083395
SR
983}
984
b17e8a37
SR
985static void print_ip_ins(const char *fmt, unsigned char *p)
986{
987 int i;
988
989 printk(KERN_CONT "%s", fmt);
990
991 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
992 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
993}
994
31e88909 995static void ftrace_bug(int failed, unsigned long ip)
b17e8a37
SR
996{
997 switch (failed) {
998 case -EFAULT:
999 FTRACE_WARN_ON_ONCE(1);
1000 pr_info("ftrace faulted on modifying ");
1001 print_ip_sym(ip);
1002 break;
1003 case -EINVAL:
1004 FTRACE_WARN_ON_ONCE(1);
1005 pr_info("ftrace failed to modify ");
1006 print_ip_sym(ip);
b17e8a37 1007 print_ip_ins(" actual: ", (unsigned char *)ip);
b17e8a37
SR
1008 printk(KERN_CONT "\n");
1009 break;
1010 case -EPERM:
1011 FTRACE_WARN_ON_ONCE(1);
1012 pr_info("ftrace faulted on writing ");
1013 print_ip_sym(ip);
1014 break;
1015 default:
1016 FTRACE_WARN_ON_ONCE(1);
1017 pr_info("ftrace faulted on unknown error ");
1018 print_ip_sym(ip);
1019 }
1020}
1021
3c1720f0 1022
0eb96701 1023static int
31e88909 1024__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
5072c59f 1025{
e7d3737e 1026 unsigned long ftrace_addr;
64fbcd16 1027 unsigned long flag = 0UL;
e7d3737e 1028
f0001207 1029 ftrace_addr = (unsigned long)FTRACE_ADDR;
5072c59f 1030
982c350b 1031 /*
64fbcd16
XG
1032 * If this record is not to be traced or we want to disable it,
1033 * then disable it.
982c350b 1034 *
64fbcd16 1035 * If we want to enable it and filtering is off, then enable it.
982c350b 1036 *
64fbcd16
XG
1037 * If we want to enable it and filtering is on, enable it only if
1038 * it's filtered
982c350b 1039 */
64fbcd16
XG
1040 if (enable && !(rec->flags & FTRACE_FL_NOTRACE)) {
1041 if (!ftrace_filtered || (rec->flags & FTRACE_FL_FILTER))
1042 flag = FTRACE_FL_ENABLED;
1043 }
982c350b 1044
64fbcd16
XG
1045 /* If the state of this record hasn't changed, then do nothing */
1046 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1047 return 0;
982c350b 1048
64fbcd16
XG
1049 if (flag) {
1050 rec->flags |= FTRACE_FL_ENABLED;
1051 return ftrace_make_call(rec, ftrace_addr);
5072c59f
SR
1052 }
1053
64fbcd16
XG
1054 rec->flags &= ~FTRACE_FL_ENABLED;
1055 return ftrace_make_nop(NULL, rec, ftrace_addr);
5072c59f
SR
1056}
1057
e309b41d 1058static void ftrace_replace_code(int enable)
3c1720f0 1059{
3c1720f0
SR
1060 struct dyn_ftrace *rec;
1061 struct ftrace_page *pg;
6a24a244 1062 int failed;
3c1720f0 1063
265c831c
SR
1064 do_for_each_ftrace_rec(pg, rec) {
1065 /*
fa9d13cf
Z
1066 * Skip over free records, records that have
1067 * failed and not converted.
265c831c
SR
1068 */
1069 if (rec->flags & FTRACE_FL_FREE ||
fa9d13cf 1070 rec->flags & FTRACE_FL_FAILED ||
03303549 1071 !(rec->flags & FTRACE_FL_CONVERTED))
265c831c
SR
1072 continue;
1073
1074 /* ignore updates to this record's mcount site */
1075 if (get_kprobe((void *)rec->ip)) {
1076 freeze_record(rec);
1077 continue;
1078 } else {
1079 unfreeze_record(rec);
1080 }
f22f9a89 1081
265c831c 1082 failed = __ftrace_replace_code(rec, enable);
fa9d13cf 1083 if (failed) {
265c831c 1084 rec->flags |= FTRACE_FL_FAILED;
3279ba37
SR
1085 ftrace_bug(failed, rec->ip);
1086 /* Stop processing */
1087 return;
3c1720f0 1088 }
265c831c 1089 } while_for_each_ftrace_rec();
3c1720f0
SR
1090}
1091
492a7ea5 1092static int
31e88909 1093ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
3c1720f0
SR
1094{
1095 unsigned long ip;
593eb8a2 1096 int ret;
3c1720f0
SR
1097
1098 ip = rec->ip;
1099
25aac9dc 1100 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
593eb8a2 1101 if (ret) {
31e88909 1102 ftrace_bug(ret, ip);
3c1720f0 1103 rec->flags |= FTRACE_FL_FAILED;
492a7ea5 1104 return 0;
37ad5084 1105 }
492a7ea5 1106 return 1;
3c1720f0
SR
1107}
1108
000ab691
SR
1109/*
1110 * archs can override this function if they must do something
1111 * before the modifying code is performed.
1112 */
1113int __weak ftrace_arch_code_modify_prepare(void)
1114{
1115 return 0;
1116}
1117
1118/*
1119 * archs can override this function if they must do something
1120 * after the modifying code is performed.
1121 */
1122int __weak ftrace_arch_code_modify_post_process(void)
1123{
1124 return 0;
1125}
1126
e309b41d 1127static int __ftrace_modify_code(void *data)
3d083395 1128{
d61f82d0
SR
1129 int *command = data;
1130
a3583244 1131 if (*command & FTRACE_ENABLE_CALLS)
d61f82d0 1132 ftrace_replace_code(1);
a3583244 1133 else if (*command & FTRACE_DISABLE_CALLS)
d61f82d0
SR
1134 ftrace_replace_code(0);
1135
1136 if (*command & FTRACE_UPDATE_TRACE_FUNC)
1137 ftrace_update_ftrace_func(ftrace_trace_function);
1138
5a45cfe1
SR
1139 if (*command & FTRACE_START_FUNC_RET)
1140 ftrace_enable_ftrace_graph_caller();
1141 else if (*command & FTRACE_STOP_FUNC_RET)
1142 ftrace_disable_ftrace_graph_caller();
1143
d61f82d0 1144 return 0;
3d083395
SR
1145}
1146
e309b41d 1147static void ftrace_run_update_code(int command)
3d083395 1148{
000ab691
SR
1149 int ret;
1150
1151 ret = ftrace_arch_code_modify_prepare();
1152 FTRACE_WARN_ON(ret);
1153 if (ret)
1154 return;
1155
784e2d76 1156 stop_machine(__ftrace_modify_code, &command, NULL);
000ab691
SR
1157
1158 ret = ftrace_arch_code_modify_post_process();
1159 FTRACE_WARN_ON(ret);
3d083395
SR
1160}
1161
d61f82d0 1162static ftrace_func_t saved_ftrace_func;
60a7ecf4 1163static int ftrace_start_up;
df4fc315
SR
1164
1165static void ftrace_startup_enable(int command)
1166{
1167 if (saved_ftrace_func != ftrace_trace_function) {
1168 saved_ftrace_func = ftrace_trace_function;
1169 command |= FTRACE_UPDATE_TRACE_FUNC;
1170 }
1171
1172 if (!command || !ftrace_enabled)
1173 return;
1174
1175 ftrace_run_update_code(command);
1176}
d61f82d0 1177
5a45cfe1 1178static void ftrace_startup(int command)
3d083395 1179{
4eebcc81
SR
1180 if (unlikely(ftrace_disabled))
1181 return;
1182
60a7ecf4 1183 ftrace_start_up++;
982c350b 1184 command |= FTRACE_ENABLE_CALLS;
d61f82d0 1185
df4fc315 1186 ftrace_startup_enable(command);
3d083395
SR
1187}
1188
5a45cfe1 1189static void ftrace_shutdown(int command)
3d083395 1190{
4eebcc81
SR
1191 if (unlikely(ftrace_disabled))
1192 return;
1193
60a7ecf4 1194 ftrace_start_up--;
9ea1a153
FW
1195 /*
1196 * Just warn in case of unbalance, no need to kill ftrace, it's not
1197 * critical but the ftrace_call callers may be never nopped again after
1198 * further ftrace uses.
1199 */
1200 WARN_ON_ONCE(ftrace_start_up < 0);
1201
60a7ecf4 1202 if (!ftrace_start_up)
d61f82d0 1203 command |= FTRACE_DISABLE_CALLS;
3d083395 1204
d61f82d0
SR
1205 if (saved_ftrace_func != ftrace_trace_function) {
1206 saved_ftrace_func = ftrace_trace_function;
1207 command |= FTRACE_UPDATE_TRACE_FUNC;
1208 }
3d083395 1209
d61f82d0 1210 if (!command || !ftrace_enabled)
e6ea44e9 1211 return;
d61f82d0
SR
1212
1213 ftrace_run_update_code(command);
3d083395
SR
1214}
1215
e309b41d 1216static void ftrace_startup_sysctl(void)
b0fc494f 1217{
d61f82d0
SR
1218 int command = FTRACE_ENABLE_MCOUNT;
1219
4eebcc81
SR
1220 if (unlikely(ftrace_disabled))
1221 return;
1222
d61f82d0
SR
1223 /* Force update next time */
1224 saved_ftrace_func = NULL;
60a7ecf4
SR
1225 /* ftrace_start_up is true if we want ftrace running */
1226 if (ftrace_start_up)
d61f82d0
SR
1227 command |= FTRACE_ENABLE_CALLS;
1228
1229 ftrace_run_update_code(command);
b0fc494f
SR
1230}
1231
e309b41d 1232static void ftrace_shutdown_sysctl(void)
b0fc494f 1233{
d61f82d0
SR
1234 int command = FTRACE_DISABLE_MCOUNT;
1235
4eebcc81
SR
1236 if (unlikely(ftrace_disabled))
1237 return;
1238
60a7ecf4
SR
1239 /* ftrace_start_up is true if ftrace is running */
1240 if (ftrace_start_up)
d61f82d0
SR
1241 command |= FTRACE_DISABLE_CALLS;
1242
1243 ftrace_run_update_code(command);
b0fc494f
SR
1244}
1245
3d083395
SR
1246static cycle_t ftrace_update_time;
1247static unsigned long ftrace_update_cnt;
1248unsigned long ftrace_update_tot_cnt;
1249
31e88909 1250static int ftrace_update_code(struct module *mod)
3d083395 1251{
e94142a6 1252 struct dyn_ftrace *p;
f22f9a89 1253 cycle_t start, stop;
3d083395 1254
750ed1a4 1255 start = ftrace_now(raw_smp_processor_id());
3d083395
SR
1256 ftrace_update_cnt = 0;
1257
e94142a6 1258 while (ftrace_new_addrs) {
3d083395 1259
08f5ac90
SR
1260 /* If something went wrong, bail without enabling anything */
1261 if (unlikely(ftrace_disabled))
1262 return -1;
f22f9a89 1263
e94142a6 1264 p = ftrace_new_addrs;
ee000b7f 1265 ftrace_new_addrs = p->newlist;
e94142a6 1266 p->flags = 0L;
f22f9a89 1267
08f5ac90 1268 /* convert record (i.e, patch mcount-call with NOP) */
31e88909 1269 if (ftrace_code_disable(mod, p)) {
08f5ac90
SR
1270 p->flags |= FTRACE_FL_CONVERTED;
1271 ftrace_update_cnt++;
1272 } else
1273 ftrace_free_rec(p);
3d083395
SR
1274 }
1275
750ed1a4 1276 stop = ftrace_now(raw_smp_processor_id());
3d083395
SR
1277 ftrace_update_time = stop - start;
1278 ftrace_update_tot_cnt += ftrace_update_cnt;
1279
16444a8a
ACM
1280 return 0;
1281}
1282
68bf21aa 1283static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
3c1720f0
SR
1284{
1285 struct ftrace_page *pg;
1286 int cnt;
1287 int i;
3c1720f0
SR
1288
1289 /* allocate a few pages */
1290 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
1291 if (!ftrace_pages_start)
1292 return -1;
1293
1294 /*
1295 * Allocate a few more pages.
1296 *
1297 * TODO: have some parser search vmlinux before
1298 * final linking to find all calls to ftrace.
1299 * Then we can:
1300 * a) know how many pages to allocate.
1301 * and/or
1302 * b) set up the table then.
1303 *
1304 * The dynamic code is still necessary for
1305 * modules.
1306 */
1307
1308 pg = ftrace_pages = ftrace_pages_start;
1309
68bf21aa 1310 cnt = num_to_init / ENTRIES_PER_PAGE;
08f5ac90 1311 pr_info("ftrace: allocating %ld entries in %d pages\n",
5821e1b7 1312 num_to_init, cnt + 1);
3c1720f0
SR
1313
1314 for (i = 0; i < cnt; i++) {
1315 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
1316
1317 /* If we fail, we'll try later anyway */
1318 if (!pg->next)
1319 break;
1320
1321 pg = pg->next;
1322 }
1323
1324 return 0;
1325}
1326
5072c59f
SR
1327enum {
1328 FTRACE_ITER_FILTER = (1 << 0),
689fd8b6 1329 FTRACE_ITER_NOTRACE = (1 << 1),
1330 FTRACE_ITER_FAILURES = (1 << 2),
1331 FTRACE_ITER_PRINTALL = (1 << 3),
1332 FTRACE_ITER_HASH = (1 << 4),
5072c59f
SR
1333};
1334
1335#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
1336
1337struct ftrace_iterator {
5072c59f 1338 struct ftrace_page *pg;
8fc0c701 1339 int hidx;
431aa3fb 1340 int idx;
5072c59f 1341 unsigned flags;
689fd8b6 1342 struct trace_parser parser;
5072c59f
SR
1343};
1344
8fc0c701
SR
1345static void *
1346t_hash_next(struct seq_file *m, void *v, loff_t *pos)
1347{
1348 struct ftrace_iterator *iter = m->private;
1349 struct hlist_node *hnd = v;
1350 struct hlist_head *hhd;
1351
1352 WARN_ON(!(iter->flags & FTRACE_ITER_HASH));
1353
1354 (*pos)++;
1355
1356 retry:
1357 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
1358 return NULL;
1359
1360 hhd = &ftrace_func_hash[iter->hidx];
1361
1362 if (hlist_empty(hhd)) {
1363 iter->hidx++;
1364 hnd = NULL;
1365 goto retry;
1366 }
1367
1368 if (!hnd)
1369 hnd = hhd->first;
1370 else {
1371 hnd = hnd->next;
1372 if (!hnd) {
1373 iter->hidx++;
1374 goto retry;
1375 }
1376 }
1377
1378 return hnd;
1379}
1380
1381static void *t_hash_start(struct seq_file *m, loff_t *pos)
1382{
1383 struct ftrace_iterator *iter = m->private;
1384 void *p = NULL;
d82d6244
LZ
1385 loff_t l;
1386
1387 if (!(iter->flags & FTRACE_ITER_HASH))
1388 *pos = 0;
8fc0c701
SR
1389
1390 iter->flags |= FTRACE_ITER_HASH;
1391
d82d6244
LZ
1392 iter->hidx = 0;
1393 for (l = 0; l <= *pos; ) {
1394 p = t_hash_next(m, p, &l);
1395 if (!p)
1396 break;
1397 }
1398 return p;
8fc0c701
SR
1399}
1400
1401static int t_hash_show(struct seq_file *m, void *v)
1402{
b6887d79 1403 struct ftrace_func_probe *rec;
8fc0c701 1404 struct hlist_node *hnd = v;
8fc0c701 1405
b6887d79 1406 rec = hlist_entry(hnd, struct ftrace_func_probe, node);
8fc0c701 1407
809dcf29
SR
1408 if (rec->ops->print)
1409 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
1410
b375a11a 1411 seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
8fc0c701
SR
1412
1413 if (rec->data)
1414 seq_printf(m, ":%p", rec->data);
1415 seq_putc(m, '\n');
1416
1417 return 0;
1418}
1419
e309b41d 1420static void *
5072c59f
SR
1421t_next(struct seq_file *m, void *v, loff_t *pos)
1422{
1423 struct ftrace_iterator *iter = m->private;
1424 struct dyn_ftrace *rec = NULL;
1425
8fc0c701
SR
1426 if (iter->flags & FTRACE_ITER_HASH)
1427 return t_hash_next(m, v, pos);
1428
5072c59f
SR
1429 (*pos)++;
1430
0c75a3ed
SR
1431 if (iter->flags & FTRACE_ITER_PRINTALL)
1432 return NULL;
1433
5072c59f
SR
1434 retry:
1435 if (iter->idx >= iter->pg->index) {
1436 if (iter->pg->next) {
1437 iter->pg = iter->pg->next;
1438 iter->idx = 0;
1439 goto retry;
1440 }
1441 } else {
1442 rec = &iter->pg->records[iter->idx++];
a9fdda33
SR
1443 if ((rec->flags & FTRACE_FL_FREE) ||
1444
1445 (!(iter->flags & FTRACE_ITER_FAILURES) &&
eb9a7bf0
AS
1446 (rec->flags & FTRACE_FL_FAILED)) ||
1447
1448 ((iter->flags & FTRACE_ITER_FAILURES) &&
a9fdda33 1449 !(rec->flags & FTRACE_FL_FAILED)) ||
eb9a7bf0 1450
0183fb1c
SR
1451 ((iter->flags & FTRACE_ITER_FILTER) &&
1452 !(rec->flags & FTRACE_FL_FILTER)) ||
1453
41c52c0d
SR
1454 ((iter->flags & FTRACE_ITER_NOTRACE) &&
1455 !(rec->flags & FTRACE_FL_NOTRACE))) {
5072c59f
SR
1456 rec = NULL;
1457 goto retry;
1458 }
1459 }
1460
5072c59f
SR
1461 return rec;
1462}
1463
1464static void *t_start(struct seq_file *m, loff_t *pos)
1465{
1466 struct ftrace_iterator *iter = m->private;
1467 void *p = NULL;
694ce0a5 1468 loff_t l;
5072c59f 1469
8fc0c701 1470 mutex_lock(&ftrace_lock);
0c75a3ed
SR
1471 /*
1472 * For set_ftrace_filter reading, if we have the filter
1473 * off, we can short cut and just print out that all
1474 * functions are enabled.
1475 */
1476 if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
1477 if (*pos > 0)
8fc0c701 1478 return t_hash_start(m, pos);
0c75a3ed 1479 iter->flags |= FTRACE_ITER_PRINTALL;
0c75a3ed
SR
1480 return iter;
1481 }
1482
8fc0c701
SR
1483 if (iter->flags & FTRACE_ITER_HASH)
1484 return t_hash_start(m, pos);
1485
694ce0a5
LZ
1486 iter->pg = ftrace_pages_start;
1487 iter->idx = 0;
1488 for (l = 0; l <= *pos; ) {
1489 p = t_next(m, p, &l);
1490 if (!p)
1491 break;
50cdaf08 1492 }
5821e1b7 1493
694ce0a5 1494 if (!p && iter->flags & FTRACE_ITER_FILTER)
8fc0c701
SR
1495 return t_hash_start(m, pos);
1496
5072c59f
SR
1497 return p;
1498}
1499
1500static void t_stop(struct seq_file *m, void *p)
1501{
8fc0c701 1502 mutex_unlock(&ftrace_lock);
5072c59f
SR
1503}
1504
1505static int t_show(struct seq_file *m, void *v)
1506{
0c75a3ed 1507 struct ftrace_iterator *iter = m->private;
5072c59f 1508 struct dyn_ftrace *rec = v;
5072c59f 1509
8fc0c701
SR
1510 if (iter->flags & FTRACE_ITER_HASH)
1511 return t_hash_show(m, v);
1512
0c75a3ed
SR
1513 if (iter->flags & FTRACE_ITER_PRINTALL) {
1514 seq_printf(m, "#### all functions enabled ####\n");
1515 return 0;
1516 }
1517
5072c59f
SR
1518 if (!rec)
1519 return 0;
1520
b375a11a 1521 seq_printf(m, "%ps\n", (void *)rec->ip);
5072c59f
SR
1522
1523 return 0;
1524}
1525
88e9d34c 1526static const struct seq_operations show_ftrace_seq_ops = {
5072c59f
SR
1527 .start = t_start,
1528 .next = t_next,
1529 .stop = t_stop,
1530 .show = t_show,
1531};
1532
e309b41d 1533static int
5072c59f
SR
1534ftrace_avail_open(struct inode *inode, struct file *file)
1535{
1536 struct ftrace_iterator *iter;
1537 int ret;
1538
4eebcc81
SR
1539 if (unlikely(ftrace_disabled))
1540 return -ENODEV;
1541
5072c59f
SR
1542 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1543 if (!iter)
1544 return -ENOMEM;
1545
1546 iter->pg = ftrace_pages_start;
5072c59f
SR
1547
1548 ret = seq_open(file, &show_ftrace_seq_ops);
1549 if (!ret) {
1550 struct seq_file *m = file->private_data;
4bf39a94 1551
5072c59f 1552 m->private = iter;
4bf39a94 1553 } else {
5072c59f 1554 kfree(iter);
4bf39a94 1555 }
5072c59f
SR
1556
1557 return ret;
1558}
1559
eb9a7bf0
AS
1560static int
1561ftrace_failures_open(struct inode *inode, struct file *file)
1562{
1563 int ret;
1564 struct seq_file *m;
1565 struct ftrace_iterator *iter;
1566
1567 ret = ftrace_avail_open(inode, file);
1568 if (!ret) {
1569 m = (struct seq_file *)file->private_data;
1570 iter = (struct ftrace_iterator *)m->private;
1571 iter->flags = FTRACE_ITER_FAILURES;
1572 }
1573
1574 return ret;
1575}
1576
1577
41c52c0d 1578static void ftrace_filter_reset(int enable)
5072c59f
SR
1579{
1580 struct ftrace_page *pg;
1581 struct dyn_ftrace *rec;
41c52c0d 1582 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
5072c59f 1583
52baf119 1584 mutex_lock(&ftrace_lock);
41c52c0d
SR
1585 if (enable)
1586 ftrace_filtered = 0;
265c831c
SR
1587 do_for_each_ftrace_rec(pg, rec) {
1588 if (rec->flags & FTRACE_FL_FAILED)
1589 continue;
1590 rec->flags &= ~type;
1591 } while_for_each_ftrace_rec();
52baf119 1592 mutex_unlock(&ftrace_lock);
5072c59f
SR
1593}
1594
e309b41d 1595static int
41c52c0d 1596ftrace_regex_open(struct inode *inode, struct file *file, int enable)
5072c59f
SR
1597{
1598 struct ftrace_iterator *iter;
1599 int ret = 0;
1600
4eebcc81
SR
1601 if (unlikely(ftrace_disabled))
1602 return -ENODEV;
1603
5072c59f
SR
1604 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1605 if (!iter)
1606 return -ENOMEM;
1607
689fd8b6 1608 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
1609 kfree(iter);
1610 return -ENOMEM;
1611 }
1612
41c52c0d 1613 mutex_lock(&ftrace_regex_lock);
5072c59f 1614 if ((file->f_mode & FMODE_WRITE) &&
8650ae32 1615 (file->f_flags & O_TRUNC))
41c52c0d 1616 ftrace_filter_reset(enable);
5072c59f
SR
1617
1618 if (file->f_mode & FMODE_READ) {
1619 iter->pg = ftrace_pages_start;
41c52c0d
SR
1620 iter->flags = enable ? FTRACE_ITER_FILTER :
1621 FTRACE_ITER_NOTRACE;
5072c59f
SR
1622
1623 ret = seq_open(file, &show_ftrace_seq_ops);
1624 if (!ret) {
1625 struct seq_file *m = file->private_data;
1626 m->private = iter;
79fe249c
LZ
1627 } else {
1628 trace_parser_put(&iter->parser);
5072c59f 1629 kfree(iter);
79fe249c 1630 }
5072c59f
SR
1631 } else
1632 file->private_data = iter;
41c52c0d 1633 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1634
1635 return ret;
1636}
1637
41c52c0d
SR
1638static int
1639ftrace_filter_open(struct inode *inode, struct file *file)
1640{
1641 return ftrace_regex_open(inode, file, 1);
1642}
1643
1644static int
1645ftrace_notrace_open(struct inode *inode, struct file *file)
1646{
1647 return ftrace_regex_open(inode, file, 0);
1648}
1649
e309b41d 1650static loff_t
41c52c0d 1651ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
5072c59f
SR
1652{
1653 loff_t ret;
1654
1655 if (file->f_mode & FMODE_READ)
1656 ret = seq_lseek(file, offset, origin);
1657 else
1658 file->f_pos = ret = 1;
1659
1660 return ret;
1661}
1662
64e7c440 1663static int ftrace_match(char *str, char *regex, int len, int type)
9f4801e3 1664{
9f4801e3
SR
1665 int matched = 0;
1666 char *ptr;
1667
9f4801e3
SR
1668 switch (type) {
1669 case MATCH_FULL:
1670 if (strcmp(str, regex) == 0)
1671 matched = 1;
1672 break;
1673 case MATCH_FRONT_ONLY:
1674 if (strncmp(str, regex, len) == 0)
1675 matched = 1;
1676 break;
1677 case MATCH_MIDDLE_ONLY:
1678 if (strstr(str, regex))
1679 matched = 1;
1680 break;
1681 case MATCH_END_ONLY:
1682 ptr = strstr(str, regex);
1683 if (ptr && (ptr[len] == 0))
1684 matched = 1;
1685 break;
1686 }
1687
1688 return matched;
1689}
1690
64e7c440
SR
1691static int
1692ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
1693{
1694 char str[KSYM_SYMBOL_LEN];
1695
1696 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1697 return ftrace_match(str, regex, len, type);
1698}
1699
9f4801e3
SR
1700static void ftrace_match_records(char *buff, int len, int enable)
1701{
6a24a244 1702 unsigned int search_len;
9f4801e3
SR
1703 struct ftrace_page *pg;
1704 struct dyn_ftrace *rec;
6a24a244
SR
1705 unsigned long flag;
1706 char *search;
9f4801e3 1707 int type;
9f4801e3
SR
1708 int not;
1709
6a24a244 1710 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
3f6fe06d 1711 type = filter_parse_regex(buff, len, &search, &not);
9f4801e3
SR
1712
1713 search_len = strlen(search);
1714
52baf119 1715 mutex_lock(&ftrace_lock);
265c831c 1716 do_for_each_ftrace_rec(pg, rec) {
265c831c
SR
1717
1718 if (rec->flags & FTRACE_FL_FAILED)
1719 continue;
9f4801e3
SR
1720
1721 if (ftrace_match_record(rec, search, search_len, type)) {
265c831c
SR
1722 if (not)
1723 rec->flags &= ~flag;
1724 else
1725 rec->flags |= flag;
1726 }
e68746a2
SR
1727 /*
1728 * Only enable filtering if we have a function that
1729 * is filtered on.
1730 */
1731 if (enable && (rec->flags & FTRACE_FL_FILTER))
1732 ftrace_filtered = 1;
265c831c 1733 } while_for_each_ftrace_rec();
52baf119 1734 mutex_unlock(&ftrace_lock);
5072c59f
SR
1735}
1736
64e7c440
SR
1737static int
1738ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
1739 char *regex, int len, int type)
1740{
1741 char str[KSYM_SYMBOL_LEN];
1742 char *modname;
1743
1744 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
1745
1746 if (!modname || strcmp(modname, mod))
1747 return 0;
1748
1749 /* blank search means to match all funcs in the mod */
1750 if (len)
1751 return ftrace_match(str, regex, len, type);
1752 else
1753 return 1;
1754}
1755
1756static void ftrace_match_module_records(char *buff, char *mod, int enable)
1757{
6a24a244 1758 unsigned search_len = 0;
64e7c440
SR
1759 struct ftrace_page *pg;
1760 struct dyn_ftrace *rec;
1761 int type = MATCH_FULL;
6a24a244
SR
1762 char *search = buff;
1763 unsigned long flag;
64e7c440
SR
1764 int not = 0;
1765
6a24a244
SR
1766 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1767
64e7c440
SR
1768 /* blank or '*' mean the same */
1769 if (strcmp(buff, "*") == 0)
1770 buff[0] = 0;
1771
1772 /* handle the case of 'dont filter this module' */
1773 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
1774 buff[0] = 0;
1775 not = 1;
1776 }
1777
1778 if (strlen(buff)) {
3f6fe06d 1779 type = filter_parse_regex(buff, strlen(buff), &search, &not);
64e7c440
SR
1780 search_len = strlen(search);
1781 }
1782
52baf119 1783 mutex_lock(&ftrace_lock);
64e7c440
SR
1784 do_for_each_ftrace_rec(pg, rec) {
1785
1786 if (rec->flags & FTRACE_FL_FAILED)
1787 continue;
1788
1789 if (ftrace_match_module_record(rec, mod,
1790 search, search_len, type)) {
1791 if (not)
1792 rec->flags &= ~flag;
1793 else
1794 rec->flags |= flag;
1795 }
e68746a2
SR
1796 if (enable && (rec->flags & FTRACE_FL_FILTER))
1797 ftrace_filtered = 1;
64e7c440
SR
1798
1799 } while_for_each_ftrace_rec();
52baf119 1800 mutex_unlock(&ftrace_lock);
64e7c440
SR
1801}
1802
f6180773
SR
1803/*
1804 * We register the module command as a template to show others how
1805 * to register the a command as well.
1806 */
1807
1808static int
1809ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
1810{
1811 char *mod;
1812
1813 /*
1814 * cmd == 'mod' because we only registered this func
1815 * for the 'mod' ftrace_func_command.
1816 * But if you register one func with multiple commands,
1817 * you can tell which command was used by the cmd
1818 * parameter.
1819 */
1820
1821 /* we must have a module name */
1822 if (!param)
1823 return -EINVAL;
1824
1825 mod = strsep(&param, ":");
1826 if (!strlen(mod))
1827 return -EINVAL;
1828
1829 ftrace_match_module_records(func, mod, enable);
1830 return 0;
1831}
1832
1833static struct ftrace_func_command ftrace_mod_cmd = {
1834 .name = "mod",
1835 .func = ftrace_mod_callback,
1836};
1837
1838static int __init ftrace_mod_cmd_init(void)
1839{
1840 return register_ftrace_command(&ftrace_mod_cmd);
1841}
1842device_initcall(ftrace_mod_cmd_init);
1843
59df055f 1844static void
b6887d79 1845function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
59df055f 1846{
b6887d79 1847 struct ftrace_func_probe *entry;
59df055f
SR
1848 struct hlist_head *hhd;
1849 struct hlist_node *n;
1850 unsigned long key;
1851 int resched;
1852
1853 key = hash_long(ip, FTRACE_HASH_BITS);
1854
1855 hhd = &ftrace_func_hash[key];
1856
1857 if (hlist_empty(hhd))
1858 return;
1859
1860 /*
1861 * Disable preemption for these calls to prevent a RCU grace
1862 * period. This syncs the hash iteration and freeing of items
1863 * on the hash. rcu_read_lock is too dangerous here.
1864 */
1865 resched = ftrace_preempt_disable();
1866 hlist_for_each_entry_rcu(entry, n, hhd, node) {
1867 if (entry->ip == ip)
1868 entry->ops->func(ip, parent_ip, &entry->data);
1869 }
1870 ftrace_preempt_enable(resched);
1871}
1872
b6887d79 1873static struct ftrace_ops trace_probe_ops __read_mostly =
59df055f 1874{
fb9fb015 1875 .func = function_trace_probe_call,
59df055f
SR
1876};
1877
b6887d79 1878static int ftrace_probe_registered;
59df055f 1879
b6887d79 1880static void __enable_ftrace_function_probe(void)
59df055f
SR
1881{
1882 int i;
1883
b6887d79 1884 if (ftrace_probe_registered)
59df055f
SR
1885 return;
1886
1887 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1888 struct hlist_head *hhd = &ftrace_func_hash[i];
1889 if (hhd->first)
1890 break;
1891 }
1892 /* Nothing registered? */
1893 if (i == FTRACE_FUNC_HASHSIZE)
1894 return;
1895
b6887d79 1896 __register_ftrace_function(&trace_probe_ops);
59df055f 1897 ftrace_startup(0);
b6887d79 1898 ftrace_probe_registered = 1;
59df055f
SR
1899}
1900
b6887d79 1901static void __disable_ftrace_function_probe(void)
59df055f
SR
1902{
1903 int i;
1904
b6887d79 1905 if (!ftrace_probe_registered)
59df055f
SR
1906 return;
1907
1908 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1909 struct hlist_head *hhd = &ftrace_func_hash[i];
1910 if (hhd->first)
1911 return;
1912 }
1913
1914 /* no more funcs left */
b6887d79 1915 __unregister_ftrace_function(&trace_probe_ops);
59df055f 1916 ftrace_shutdown(0);
b6887d79 1917 ftrace_probe_registered = 0;
59df055f
SR
1918}
1919
1920
1921static void ftrace_free_entry_rcu(struct rcu_head *rhp)
1922{
b6887d79
SR
1923 struct ftrace_func_probe *entry =
1924 container_of(rhp, struct ftrace_func_probe, rcu);
59df055f
SR
1925
1926 if (entry->ops->free)
1927 entry->ops->free(&entry->data);
1928 kfree(entry);
1929}
1930
1931
1932int
b6887d79 1933register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
59df055f
SR
1934 void *data)
1935{
b6887d79 1936 struct ftrace_func_probe *entry;
59df055f
SR
1937 struct ftrace_page *pg;
1938 struct dyn_ftrace *rec;
59df055f 1939 int type, len, not;
6a24a244 1940 unsigned long key;
59df055f
SR
1941 int count = 0;
1942 char *search;
1943
3f6fe06d 1944 type = filter_parse_regex(glob, strlen(glob), &search, &not);
59df055f
SR
1945 len = strlen(search);
1946
b6887d79 1947 /* we do not support '!' for function probes */
59df055f
SR
1948 if (WARN_ON(not))
1949 return -EINVAL;
1950
1951 mutex_lock(&ftrace_lock);
1952 do_for_each_ftrace_rec(pg, rec) {
1953
1954 if (rec->flags & FTRACE_FL_FAILED)
1955 continue;
1956
1957 if (!ftrace_match_record(rec, search, len, type))
1958 continue;
1959
1960 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1961 if (!entry) {
b6887d79 1962 /* If we did not process any, then return error */
59df055f
SR
1963 if (!count)
1964 count = -ENOMEM;
1965 goto out_unlock;
1966 }
1967
1968 count++;
1969
1970 entry->data = data;
1971
1972 /*
1973 * The caller might want to do something special
1974 * for each function we find. We call the callback
1975 * to give the caller an opportunity to do so.
1976 */
1977 if (ops->callback) {
1978 if (ops->callback(rec->ip, &entry->data) < 0) {
1979 /* caller does not like this func */
1980 kfree(entry);
1981 continue;
1982 }
1983 }
1984
1985 entry->ops = ops;
1986 entry->ip = rec->ip;
1987
1988 key = hash_long(entry->ip, FTRACE_HASH_BITS);
1989 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
1990
1991 } while_for_each_ftrace_rec();
b6887d79 1992 __enable_ftrace_function_probe();
59df055f
SR
1993
1994 out_unlock:
1995 mutex_unlock(&ftrace_lock);
1996
1997 return count;
1998}
1999
2000enum {
b6887d79
SR
2001 PROBE_TEST_FUNC = 1,
2002 PROBE_TEST_DATA = 2
59df055f
SR
2003};
2004
2005static void
b6887d79 2006__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
59df055f
SR
2007 void *data, int flags)
2008{
b6887d79 2009 struct ftrace_func_probe *entry;
59df055f
SR
2010 struct hlist_node *n, *tmp;
2011 char str[KSYM_SYMBOL_LEN];
2012 int type = MATCH_FULL;
2013 int i, len = 0;
2014 char *search;
2015
b36461da 2016 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
59df055f 2017 glob = NULL;
b36461da 2018 else if (glob) {
59df055f
SR
2019 int not;
2020
3f6fe06d 2021 type = filter_parse_regex(glob, strlen(glob), &search, &not);
59df055f
SR
2022 len = strlen(search);
2023
b6887d79 2024 /* we do not support '!' for function probes */
59df055f
SR
2025 if (WARN_ON(not))
2026 return;
2027 }
2028
2029 mutex_lock(&ftrace_lock);
2030 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2031 struct hlist_head *hhd = &ftrace_func_hash[i];
2032
2033 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
2034
2035 /* break up if statements for readability */
b6887d79 2036 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
59df055f
SR
2037 continue;
2038
b6887d79 2039 if ((flags & PROBE_TEST_DATA) && entry->data != data)
59df055f
SR
2040 continue;
2041
2042 /* do this last, since it is the most expensive */
2043 if (glob) {
2044 kallsyms_lookup(entry->ip, NULL, NULL,
2045 NULL, str);
2046 if (!ftrace_match(str, glob, len, type))
2047 continue;
2048 }
2049
2050 hlist_del(&entry->node);
2051 call_rcu(&entry->rcu, ftrace_free_entry_rcu);
2052 }
2053 }
b6887d79 2054 __disable_ftrace_function_probe();
59df055f
SR
2055 mutex_unlock(&ftrace_lock);
2056}
2057
2058void
b6887d79 2059unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
59df055f
SR
2060 void *data)
2061{
b6887d79
SR
2062 __unregister_ftrace_function_probe(glob, ops, data,
2063 PROBE_TEST_FUNC | PROBE_TEST_DATA);
59df055f
SR
2064}
2065
2066void
b6887d79 2067unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
59df055f 2068{
b6887d79 2069 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
59df055f
SR
2070}
2071
b6887d79 2072void unregister_ftrace_function_probe_all(char *glob)
59df055f 2073{
b6887d79 2074 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
59df055f
SR
2075}
2076
f6180773
SR
2077static LIST_HEAD(ftrace_commands);
2078static DEFINE_MUTEX(ftrace_cmd_mutex);
2079
2080int register_ftrace_command(struct ftrace_func_command *cmd)
2081{
2082 struct ftrace_func_command *p;
2083 int ret = 0;
2084
2085 mutex_lock(&ftrace_cmd_mutex);
2086 list_for_each_entry(p, &ftrace_commands, list) {
2087 if (strcmp(cmd->name, p->name) == 0) {
2088 ret = -EBUSY;
2089 goto out_unlock;
2090 }
2091 }
2092 list_add(&cmd->list, &ftrace_commands);
2093 out_unlock:
2094 mutex_unlock(&ftrace_cmd_mutex);
2095
2096 return ret;
2097}
2098
2099int unregister_ftrace_command(struct ftrace_func_command *cmd)
2100{
2101 struct ftrace_func_command *p, *n;
2102 int ret = -ENODEV;
2103
2104 mutex_lock(&ftrace_cmd_mutex);
2105 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
2106 if (strcmp(cmd->name, p->name) == 0) {
2107 ret = 0;
2108 list_del_init(&p->list);
2109 goto out_unlock;
2110 }
2111 }
2112 out_unlock:
2113 mutex_unlock(&ftrace_cmd_mutex);
2114
2115 return ret;
2116}
2117
64e7c440
SR
2118static int ftrace_process_regex(char *buff, int len, int enable)
2119{
f6180773 2120 char *func, *command, *next = buff;
6a24a244 2121 struct ftrace_func_command *p;
f6180773 2122 int ret = -EINVAL;
64e7c440
SR
2123
2124 func = strsep(&next, ":");
2125
2126 if (!next) {
2127 ftrace_match_records(func, len, enable);
2128 return 0;
2129 }
2130
f6180773 2131 /* command found */
64e7c440
SR
2132
2133 command = strsep(&next, ":");
2134
f6180773
SR
2135 mutex_lock(&ftrace_cmd_mutex);
2136 list_for_each_entry(p, &ftrace_commands, list) {
2137 if (strcmp(p->name, command) == 0) {
2138 ret = p->func(func, command, next, enable);
2139 goto out_unlock;
2140 }
64e7c440 2141 }
f6180773
SR
2142 out_unlock:
2143 mutex_unlock(&ftrace_cmd_mutex);
64e7c440 2144
f6180773 2145 return ret;
64e7c440
SR
2146}
2147
e309b41d 2148static ssize_t
41c52c0d
SR
2149ftrace_regex_write(struct file *file, const char __user *ubuf,
2150 size_t cnt, loff_t *ppos, int enable)
5072c59f
SR
2151{
2152 struct ftrace_iterator *iter;
689fd8b6 2153 struct trace_parser *parser;
2154 ssize_t ret, read;
5072c59f 2155
4ba7978e 2156 if (!cnt)
5072c59f
SR
2157 return 0;
2158
41c52c0d 2159 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
2160
2161 if (file->f_mode & FMODE_READ) {
2162 struct seq_file *m = file->private_data;
2163 iter = m->private;
2164 } else
2165 iter = file->private_data;
2166
689fd8b6 2167 parser = &iter->parser;
2168 read = trace_get_user(parser, ubuf, cnt, ppos);
5072c59f 2169
4ba7978e 2170 if (read >= 0 && trace_parser_loaded(parser) &&
689fd8b6 2171 !trace_parser_cont(parser)) {
2172 ret = ftrace_process_regex(parser->buffer,
2173 parser->idx, enable);
5072c59f
SR
2174 if (ret)
2175 goto out;
5072c59f 2176
689fd8b6 2177 trace_parser_clear(parser);
eda1e328 2178 }
5072c59f 2179
5072c59f 2180 ret = read;
5072c59f 2181
689fd8b6 2182 mutex_unlock(&ftrace_regex_lock);
2183out:
5072c59f
SR
2184 return ret;
2185}
2186
41c52c0d
SR
2187static ssize_t
2188ftrace_filter_write(struct file *file, const char __user *ubuf,
2189 size_t cnt, loff_t *ppos)
2190{
2191 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
2192}
2193
2194static ssize_t
2195ftrace_notrace_write(struct file *file, const char __user *ubuf,
2196 size_t cnt, loff_t *ppos)
2197{
2198 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
2199}
2200
2201static void
2202ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
2203{
2204 if (unlikely(ftrace_disabled))
2205 return;
2206
2207 mutex_lock(&ftrace_regex_lock);
2208 if (reset)
2209 ftrace_filter_reset(enable);
2210 if (buf)
7f24b31b 2211 ftrace_match_records(buf, len, enable);
41c52c0d
SR
2212 mutex_unlock(&ftrace_regex_lock);
2213}
2214
77a2b37d
SR
2215/**
2216 * ftrace_set_filter - set a function to filter on in ftrace
2217 * @buf - the string that holds the function filter text.
2218 * @len - the length of the string.
2219 * @reset - non zero to reset all filters before applying this filter.
2220 *
2221 * Filters denote which functions should be enabled when tracing is enabled.
2222 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2223 */
e309b41d 2224void ftrace_set_filter(unsigned char *buf, int len, int reset)
77a2b37d 2225{
41c52c0d
SR
2226 ftrace_set_regex(buf, len, reset, 1);
2227}
4eebcc81 2228
41c52c0d
SR
2229/**
2230 * ftrace_set_notrace - set a function to not trace in ftrace
2231 * @buf - the string that holds the function notrace text.
2232 * @len - the length of the string.
2233 * @reset - non zero to reset all filters before applying this filter.
2234 *
2235 * Notrace Filters denote which functions should not be enabled when tracing
2236 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2237 * for tracing.
2238 */
2239void ftrace_set_notrace(unsigned char *buf, int len, int reset)
2240{
2241 ftrace_set_regex(buf, len, reset, 0);
77a2b37d
SR
2242}
2243
2af15d6a
SR
2244/*
2245 * command line interface to allow users to set filters on boot up.
2246 */
2247#define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
2248static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
2249static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
369bc18f 2250static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
2af15d6a
SR
2251
2252static int __init set_ftrace_notrace(char *str)
2253{
2254 strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
2255 return 1;
2256}
2257__setup("ftrace_notrace=", set_ftrace_notrace);
2258
2259static int __init set_ftrace_filter(char *str)
2260{
2261 strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
2262 return 1;
2263}
2264__setup("ftrace_filter=", set_ftrace_filter);
2265
369bc18f
SA
2266#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2267static int __init set_graph_function(char *str)
2268{
2269 strncpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
2270 return 1;
2271}
2272__setup("ftrace_graph_filter=", set_graph_function);
2273
2274static void __init set_ftrace_early_graph(char *buf)
2275{
2276 int ret;
2277 char *func;
2278
2279 while (buf) {
2280 func = strsep(&buf, ",");
2281 /* we allow only one expression at a time */
2282 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
2283 func);
2284 if (ret)
2285 printk(KERN_DEBUG "ftrace: function %s not "
2286 "traceable\n", func);
2287 }
2288}
2289#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2290
2af15d6a
SR
2291static void __init set_ftrace_early_filter(char *buf, int enable)
2292{
2293 char *func;
2294
2295 while (buf) {
2296 func = strsep(&buf, ",");
2297 ftrace_set_regex(func, strlen(func), 0, enable);
2298 }
2299}
2300
2301static void __init set_ftrace_early_filters(void)
2302{
2303 if (ftrace_filter_buf[0])
2304 set_ftrace_early_filter(ftrace_filter_buf, 1);
2305 if (ftrace_notrace_buf[0])
2306 set_ftrace_early_filter(ftrace_notrace_buf, 0);
369bc18f
SA
2307#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2308 if (ftrace_graph_buf[0])
2309 set_ftrace_early_graph(ftrace_graph_buf);
2310#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2af15d6a
SR
2311}
2312
e309b41d 2313static int
41c52c0d 2314ftrace_regex_release(struct inode *inode, struct file *file, int enable)
5072c59f
SR
2315{
2316 struct seq_file *m = (struct seq_file *)file->private_data;
2317 struct ftrace_iterator *iter;
689fd8b6 2318 struct trace_parser *parser;
5072c59f 2319
41c52c0d 2320 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
2321 if (file->f_mode & FMODE_READ) {
2322 iter = m->private;
2323
2324 seq_release(inode, file);
2325 } else
2326 iter = file->private_data;
2327
689fd8b6 2328 parser = &iter->parser;
2329 if (trace_parser_loaded(parser)) {
2330 parser->buffer[parser->idx] = 0;
2331 ftrace_match_records(parser->buffer, parser->idx, enable);
5072c59f
SR
2332 }
2333
e6ea44e9 2334 mutex_lock(&ftrace_lock);
ee02a2e5 2335 if (ftrace_start_up && ftrace_enabled)
5072c59f 2336 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
e6ea44e9 2337 mutex_unlock(&ftrace_lock);
5072c59f 2338
689fd8b6 2339 trace_parser_put(parser);
5072c59f 2340 kfree(iter);
689fd8b6 2341
41c52c0d 2342 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
2343 return 0;
2344}
2345
41c52c0d
SR
2346static int
2347ftrace_filter_release(struct inode *inode, struct file *file)
2348{
2349 return ftrace_regex_release(inode, file, 1);
2350}
2351
2352static int
2353ftrace_notrace_release(struct inode *inode, struct file *file)
2354{
2355 return ftrace_regex_release(inode, file, 0);
2356}
2357
5e2336a0 2358static const struct file_operations ftrace_avail_fops = {
5072c59f
SR
2359 .open = ftrace_avail_open,
2360 .read = seq_read,
2361 .llseek = seq_lseek,
3be04b47 2362 .release = seq_release_private,
5072c59f
SR
2363};
2364
5e2336a0 2365static const struct file_operations ftrace_failures_fops = {
eb9a7bf0
AS
2366 .open = ftrace_failures_open,
2367 .read = seq_read,
2368 .llseek = seq_lseek,
3be04b47 2369 .release = seq_release_private,
eb9a7bf0
AS
2370};
2371
5e2336a0 2372static const struct file_operations ftrace_filter_fops = {
5072c59f 2373 .open = ftrace_filter_open,
850a80cf 2374 .read = seq_read,
5072c59f 2375 .write = ftrace_filter_write,
41c52c0d 2376 .llseek = ftrace_regex_lseek,
5072c59f
SR
2377 .release = ftrace_filter_release,
2378};
2379
5e2336a0 2380static const struct file_operations ftrace_notrace_fops = {
41c52c0d 2381 .open = ftrace_notrace_open,
850a80cf 2382 .read = seq_read,
41c52c0d
SR
2383 .write = ftrace_notrace_write,
2384 .llseek = ftrace_regex_lseek,
2385 .release = ftrace_notrace_release,
2386};
2387
ea4e2bc4
SR
2388#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2389
2390static DEFINE_MUTEX(graph_lock);
2391
2392int ftrace_graph_count;
2393unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
2394
2395static void *
85951842 2396__g_next(struct seq_file *m, loff_t *pos)
ea4e2bc4 2397{
85951842 2398 if (*pos >= ftrace_graph_count)
ea4e2bc4 2399 return NULL;
a4ec5e0c 2400 return &ftrace_graph_funcs[*pos];
85951842 2401}
ea4e2bc4 2402
85951842
LZ
2403static void *
2404g_next(struct seq_file *m, void *v, loff_t *pos)
2405{
2406 (*pos)++;
2407 return __g_next(m, pos);
ea4e2bc4
SR
2408}
2409
2410static void *g_start(struct seq_file *m, loff_t *pos)
2411{
ea4e2bc4
SR
2412 mutex_lock(&graph_lock);
2413
f9349a8f
FW
2414 /* Nothing, tell g_show to print all functions are enabled */
2415 if (!ftrace_graph_count && !*pos)
2416 return (void *)1;
2417
85951842 2418 return __g_next(m, pos);
ea4e2bc4
SR
2419}
2420
2421static void g_stop(struct seq_file *m, void *p)
2422{
2423 mutex_unlock(&graph_lock);
2424}
2425
2426static int g_show(struct seq_file *m, void *v)
2427{
2428 unsigned long *ptr = v;
ea4e2bc4
SR
2429
2430 if (!ptr)
2431 return 0;
2432
f9349a8f
FW
2433 if (ptr == (unsigned long *)1) {
2434 seq_printf(m, "#### all functions enabled ####\n");
2435 return 0;
2436 }
2437
b375a11a 2438 seq_printf(m, "%ps\n", (void *)*ptr);
ea4e2bc4
SR
2439
2440 return 0;
2441}
2442
88e9d34c 2443static const struct seq_operations ftrace_graph_seq_ops = {
ea4e2bc4
SR
2444 .start = g_start,
2445 .next = g_next,
2446 .stop = g_stop,
2447 .show = g_show,
2448};
2449
2450static int
2451ftrace_graph_open(struct inode *inode, struct file *file)
2452{
2453 int ret = 0;
2454
2455 if (unlikely(ftrace_disabled))
2456 return -ENODEV;
2457
2458 mutex_lock(&graph_lock);
2459 if ((file->f_mode & FMODE_WRITE) &&
8650ae32 2460 (file->f_flags & O_TRUNC)) {
ea4e2bc4
SR
2461 ftrace_graph_count = 0;
2462 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
2463 }
a4ec5e0c 2464 mutex_unlock(&graph_lock);
ea4e2bc4 2465
a4ec5e0c 2466 if (file->f_mode & FMODE_READ)
ea4e2bc4 2467 ret = seq_open(file, &ftrace_graph_seq_ops);
ea4e2bc4
SR
2468
2469 return ret;
2470}
2471
87827111
LZ
2472static int
2473ftrace_graph_release(struct inode *inode, struct file *file)
2474{
2475 if (file->f_mode & FMODE_READ)
2476 seq_release(inode, file);
2477 return 0;
2478}
2479
ea4e2bc4 2480static int
f9349a8f 2481ftrace_set_func(unsigned long *array, int *idx, char *buffer)
ea4e2bc4 2482{
ea4e2bc4
SR
2483 struct dyn_ftrace *rec;
2484 struct ftrace_page *pg;
f9349a8f 2485 int search_len;
ea4e2bc4 2486 int found = 0;
f9349a8f
FW
2487 int type, not;
2488 char *search;
2489 bool exists;
2490 int i;
ea4e2bc4
SR
2491
2492 if (ftrace_disabled)
2493 return -ENODEV;
2494
f9349a8f 2495 /* decode regex */
3f6fe06d 2496 type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
f9349a8f
FW
2497 if (not)
2498 return -EINVAL;
2499
2500 search_len = strlen(search);
2501
52baf119 2502 mutex_lock(&ftrace_lock);
265c831c
SR
2503 do_for_each_ftrace_rec(pg, rec) {
2504
f9349a8f
FW
2505 if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
2506 break;
2507
265c831c
SR
2508 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
2509 continue;
2510
f9349a8f
FW
2511 if (ftrace_match_record(rec, search, search_len, type)) {
2512 /* ensure it is not already in the array */
2513 exists = false;
2514 for (i = 0; i < *idx; i++)
2515 if (array[i] == rec->ip) {
2516 exists = true;
265c831c
SR
2517 break;
2518 }
f9349a8f
FW
2519 if (!exists) {
2520 array[(*idx)++] = rec->ip;
2521 found = 1;
2522 }
ea4e2bc4 2523 }
265c831c 2524 } while_for_each_ftrace_rec();
f9349a8f 2525
52baf119 2526 mutex_unlock(&ftrace_lock);
ea4e2bc4
SR
2527
2528 return found ? 0 : -EINVAL;
2529}
2530
2531static ssize_t
2532ftrace_graph_write(struct file *file, const char __user *ubuf,
2533 size_t cnt, loff_t *ppos)
2534{
689fd8b6 2535 struct trace_parser parser;
4ba7978e 2536 ssize_t read, ret;
ea4e2bc4
SR
2537
2538 if (!cnt || cnt < 0)
2539 return 0;
2540
2541 mutex_lock(&graph_lock);
2542
2543 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
2544 ret = -EBUSY;
1eb90f13 2545 goto out_unlock;
ea4e2bc4
SR
2546 }
2547
689fd8b6 2548 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
2549 ret = -ENOMEM;
1eb90f13 2550 goto out_unlock;
ea4e2bc4
SR
2551 }
2552
689fd8b6 2553 read = trace_get_user(&parser, ubuf, cnt, ppos);
ea4e2bc4 2554
4ba7978e 2555 if (read >= 0 && trace_parser_loaded((&parser))) {
689fd8b6 2556 parser.buffer[parser.idx] = 0;
2557
2558 /* we allow only one expression at a time */
a4ec5e0c 2559 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
689fd8b6 2560 parser.buffer);
ea4e2bc4 2561 if (ret)
1eb90f13 2562 goto out_free;
ea4e2bc4 2563 }
ea4e2bc4
SR
2564
2565 ret = read;
1eb90f13
LZ
2566
2567out_free:
689fd8b6 2568 trace_parser_put(&parser);
1eb90f13 2569out_unlock:
ea4e2bc4
SR
2570 mutex_unlock(&graph_lock);
2571
2572 return ret;
2573}
2574
2575static const struct file_operations ftrace_graph_fops = {
87827111
LZ
2576 .open = ftrace_graph_open,
2577 .read = seq_read,
2578 .write = ftrace_graph_write,
2579 .release = ftrace_graph_release,
ea4e2bc4
SR
2580};
2581#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2582
df4fc315 2583static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
5072c59f 2584{
5072c59f 2585
5452af66
FW
2586 trace_create_file("available_filter_functions", 0444,
2587 d_tracer, NULL, &ftrace_avail_fops);
5072c59f 2588
5452af66
FW
2589 trace_create_file("failures", 0444,
2590 d_tracer, NULL, &ftrace_failures_fops);
eb9a7bf0 2591
5452af66
FW
2592 trace_create_file("set_ftrace_filter", 0644, d_tracer,
2593 NULL, &ftrace_filter_fops);
41c52c0d 2594
5452af66 2595 trace_create_file("set_ftrace_notrace", 0644, d_tracer,
41c52c0d 2596 NULL, &ftrace_notrace_fops);
ad90c0e3 2597
ea4e2bc4 2598#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5452af66 2599 trace_create_file("set_graph_function", 0444, d_tracer,
ea4e2bc4
SR
2600 NULL,
2601 &ftrace_graph_fops);
ea4e2bc4
SR
2602#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2603
5072c59f
SR
2604 return 0;
2605}
2606
31e88909
SR
2607static int ftrace_convert_nops(struct module *mod,
2608 unsigned long *start,
68bf21aa
SR
2609 unsigned long *end)
2610{
2611 unsigned long *p;
2612 unsigned long addr;
2613 unsigned long flags;
2614
e6ea44e9 2615 mutex_lock(&ftrace_lock);
68bf21aa
SR
2616 p = start;
2617 while (p < end) {
2618 addr = ftrace_call_adjust(*p++);
20e5227e
SR
2619 /*
2620 * Some architecture linkers will pad between
2621 * the different mcount_loc sections of different
2622 * object files to satisfy alignments.
2623 * Skip any NULL pointers.
2624 */
2625 if (!addr)
2626 continue;
68bf21aa 2627 ftrace_record_ip(addr);
68bf21aa
SR
2628 }
2629
08f5ac90 2630 /* disable interrupts to prevent kstop machine */
68bf21aa 2631 local_irq_save(flags);
31e88909 2632 ftrace_update_code(mod);
68bf21aa 2633 local_irq_restore(flags);
e6ea44e9 2634 mutex_unlock(&ftrace_lock);
68bf21aa
SR
2635
2636 return 0;
2637}
2638
93eb677d 2639#ifdef CONFIG_MODULES
e7247a15 2640void ftrace_release_mod(struct module *mod)
93eb677d
SR
2641{
2642 struct dyn_ftrace *rec;
2643 struct ftrace_page *pg;
93eb677d 2644
e7247a15 2645 if (ftrace_disabled)
93eb677d
SR
2646 return;
2647
2648 mutex_lock(&ftrace_lock);
2649 do_for_each_ftrace_rec(pg, rec) {
e7247a15 2650 if (within_module_core(rec->ip, mod)) {
93eb677d
SR
2651 /*
2652 * rec->ip is changed in ftrace_free_rec()
2653 * It should not between s and e if record was freed.
2654 */
2655 FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
2656 ftrace_free_rec(rec);
2657 }
2658 } while_for_each_ftrace_rec();
2659 mutex_unlock(&ftrace_lock);
2660}
2661
2662static void ftrace_init_module(struct module *mod,
2663 unsigned long *start, unsigned long *end)
90d595fe 2664{
00fd61ae 2665 if (ftrace_disabled || start == end)
fed1939c 2666 return;
31e88909 2667 ftrace_convert_nops(mod, start, end);
90d595fe
SR
2668}
2669
93eb677d
SR
2670static int ftrace_module_notify(struct notifier_block *self,
2671 unsigned long val, void *data)
2672{
2673 struct module *mod = data;
2674
2675 switch (val) {
2676 case MODULE_STATE_COMING:
2677 ftrace_init_module(mod, mod->ftrace_callsites,
2678 mod->ftrace_callsites +
2679 mod->num_ftrace_callsites);
2680 break;
2681 case MODULE_STATE_GOING:
e7247a15 2682 ftrace_release_mod(mod);
93eb677d
SR
2683 break;
2684 }
2685
2686 return 0;
2687}
2688#else
2689static int ftrace_module_notify(struct notifier_block *self,
2690 unsigned long val, void *data)
2691{
2692 return 0;
2693}
2694#endif /* CONFIG_MODULES */
2695
2696struct notifier_block ftrace_module_nb = {
2697 .notifier_call = ftrace_module_notify,
2698 .priority = 0,
2699};
2700
68bf21aa
SR
2701extern unsigned long __start_mcount_loc[];
2702extern unsigned long __stop_mcount_loc[];
2703
2704void __init ftrace_init(void)
2705{
2706 unsigned long count, addr, flags;
2707 int ret;
2708
2709 /* Keep the ftrace pointer to the stub */
2710 addr = (unsigned long)ftrace_stub;
2711
2712 local_irq_save(flags);
2713 ftrace_dyn_arch_init(&addr);
2714 local_irq_restore(flags);
2715
2716 /* ftrace_dyn_arch_init places the return code in addr */
2717 if (addr)
2718 goto failed;
2719
2720 count = __stop_mcount_loc - __start_mcount_loc;
2721
2722 ret = ftrace_dyn_table_alloc(count);
2723 if (ret)
2724 goto failed;
2725
2726 last_ftrace_enabled = ftrace_enabled = 1;
2727
31e88909
SR
2728 ret = ftrace_convert_nops(NULL,
2729 __start_mcount_loc,
68bf21aa
SR
2730 __stop_mcount_loc);
2731
93eb677d 2732 ret = register_module_notifier(&ftrace_module_nb);
24ed0c4b 2733 if (ret)
93eb677d
SR
2734 pr_warning("Failed to register trace ftrace module notifier\n");
2735
2af15d6a
SR
2736 set_ftrace_early_filters();
2737
68bf21aa
SR
2738 return;
2739 failed:
2740 ftrace_disabled = 1;
2741}
68bf21aa 2742
3d083395 2743#else
0b6e4d56
FW
2744
2745static int __init ftrace_nodyn_init(void)
2746{
2747 ftrace_enabled = 1;
2748 return 0;
2749}
2750device_initcall(ftrace_nodyn_init);
2751
df4fc315
SR
2752static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
2753static inline void ftrace_startup_enable(int command) { }
5a45cfe1
SR
2754/* Keep as macros so we do not need to define the commands */
2755# define ftrace_startup(command) do { } while (0)
2756# define ftrace_shutdown(command) do { } while (0)
c7aafc54
IM
2757# define ftrace_startup_sysctl() do { } while (0)
2758# define ftrace_shutdown_sysctl() do { } while (0)
3d083395
SR
2759#endif /* CONFIG_DYNAMIC_FTRACE */
2760
df4fc315
SR
2761static ssize_t
2762ftrace_pid_read(struct file *file, char __user *ubuf,
2763 size_t cnt, loff_t *ppos)
2764{
2765 char buf[64];
2766 int r;
2767
e32d8956
SR
2768 if (ftrace_pid_trace == ftrace_swapper_pid)
2769 r = sprintf(buf, "swapper tasks\n");
2770 else if (ftrace_pid_trace)
cc59c9e8 2771 r = sprintf(buf, "%u\n", pid_vnr(ftrace_pid_trace));
df4fc315
SR
2772 else
2773 r = sprintf(buf, "no pid\n");
2774
2775 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2776}
2777
e32d8956 2778static void clear_ftrace_swapper(void)
978f3a45
SR
2779{
2780 struct task_struct *p;
e32d8956 2781 int cpu;
978f3a45 2782
e32d8956
SR
2783 get_online_cpus();
2784 for_each_online_cpu(cpu) {
2785 p = idle_task(cpu);
978f3a45 2786 clear_tsk_trace_trace(p);
e32d8956
SR
2787 }
2788 put_online_cpus();
2789}
978f3a45 2790
e32d8956
SR
2791static void set_ftrace_swapper(void)
2792{
2793 struct task_struct *p;
2794 int cpu;
2795
2796 get_online_cpus();
2797 for_each_online_cpu(cpu) {
2798 p = idle_task(cpu);
2799 set_tsk_trace_trace(p);
2800 }
2801 put_online_cpus();
978f3a45
SR
2802}
2803
e32d8956
SR
2804static void clear_ftrace_pid(struct pid *pid)
2805{
2806 struct task_struct *p;
2807
229c4ef8 2808 rcu_read_lock();
e32d8956
SR
2809 do_each_pid_task(pid, PIDTYPE_PID, p) {
2810 clear_tsk_trace_trace(p);
2811 } while_each_pid_task(pid, PIDTYPE_PID, p);
229c4ef8
ON
2812 rcu_read_unlock();
2813
e32d8956
SR
2814 put_pid(pid);
2815}
2816
2817static void set_ftrace_pid(struct pid *pid)
978f3a45
SR
2818{
2819 struct task_struct *p;
2820
229c4ef8 2821 rcu_read_lock();
978f3a45
SR
2822 do_each_pid_task(pid, PIDTYPE_PID, p) {
2823 set_tsk_trace_trace(p);
2824 } while_each_pid_task(pid, PIDTYPE_PID, p);
229c4ef8 2825 rcu_read_unlock();
978f3a45
SR
2826}
2827
e32d8956
SR
2828static void clear_ftrace_pid_task(struct pid **pid)
2829{
2830 if (*pid == ftrace_swapper_pid)
2831 clear_ftrace_swapper();
2832 else
2833 clear_ftrace_pid(*pid);
2834
2835 *pid = NULL;
2836}
2837
2838static void set_ftrace_pid_task(struct pid *pid)
2839{
2840 if (pid == ftrace_swapper_pid)
2841 set_ftrace_swapper();
2842 else
2843 set_ftrace_pid(pid);
2844}
2845
df4fc315
SR
2846static ssize_t
2847ftrace_pid_write(struct file *filp, const char __user *ubuf,
2848 size_t cnt, loff_t *ppos)
2849{
978f3a45 2850 struct pid *pid;
df4fc315
SR
2851 char buf[64];
2852 long val;
2853 int ret;
2854
2855 if (cnt >= sizeof(buf))
2856 return -EINVAL;
2857
2858 if (copy_from_user(&buf, ubuf, cnt))
2859 return -EFAULT;
2860
2861 buf[cnt] = 0;
2862
2863 ret = strict_strtol(buf, 10, &val);
2864 if (ret < 0)
2865 return ret;
2866
e6ea44e9 2867 mutex_lock(&ftrace_lock);
978f3a45 2868 if (val < 0) {
df4fc315 2869 /* disable pid tracing */
978f3a45 2870 if (!ftrace_pid_trace)
df4fc315 2871 goto out;
978f3a45
SR
2872
2873 clear_ftrace_pid_task(&ftrace_pid_trace);
df4fc315
SR
2874
2875 } else {
e32d8956
SR
2876 /* swapper task is special */
2877 if (!val) {
2878 pid = ftrace_swapper_pid;
2879 if (pid == ftrace_pid_trace)
2880 goto out;
2881 } else {
2882 pid = find_get_pid(val);
df4fc315 2883
e32d8956
SR
2884 if (pid == ftrace_pid_trace) {
2885 put_pid(pid);
2886 goto out;
2887 }
0ef8cde5 2888 }
0ef8cde5 2889
978f3a45
SR
2890 if (ftrace_pid_trace)
2891 clear_ftrace_pid_task(&ftrace_pid_trace);
2892
2893 if (!pid)
2894 goto out;
2895
2896 ftrace_pid_trace = pid;
2897
2898 set_ftrace_pid_task(ftrace_pid_trace);
df4fc315
SR
2899 }
2900
2901 /* update the function call */
2902 ftrace_update_pid_func();
2903 ftrace_startup_enable(0);
2904
2905 out:
e6ea44e9 2906 mutex_unlock(&ftrace_lock);
df4fc315
SR
2907
2908 return cnt;
2909}
2910
5e2336a0 2911static const struct file_operations ftrace_pid_fops = {
df4fc315
SR
2912 .read = ftrace_pid_read,
2913 .write = ftrace_pid_write,
2914};
2915
2916static __init int ftrace_init_debugfs(void)
2917{
2918 struct dentry *d_tracer;
df4fc315
SR
2919
2920 d_tracer = tracing_init_dentry();
2921 if (!d_tracer)
2922 return 0;
2923
2924 ftrace_init_dyn_debugfs(d_tracer);
2925
5452af66
FW
2926 trace_create_file("set_ftrace_pid", 0644, d_tracer,
2927 NULL, &ftrace_pid_fops);
493762fc
SR
2928
2929 ftrace_profile_debugfs(d_tracer);
2930
df4fc315
SR
2931 return 0;
2932}
df4fc315
SR
2933fs_initcall(ftrace_init_debugfs);
2934
a2bb6a3d 2935/**
81adbdc0 2936 * ftrace_kill - kill ftrace
a2bb6a3d
SR
2937 *
2938 * This function should be used by panic code. It stops ftrace
2939 * but in a not so nice way. If you need to simply kill ftrace
2940 * from a non-atomic section, use ftrace_kill.
2941 */
81adbdc0 2942void ftrace_kill(void)
a2bb6a3d
SR
2943{
2944 ftrace_disabled = 1;
2945 ftrace_enabled = 0;
a2bb6a3d
SR
2946 clear_ftrace_function();
2947}
2948
16444a8a 2949/**
3d083395
SR
2950 * register_ftrace_function - register a function for profiling
2951 * @ops - ops structure that holds the function for profiling.
16444a8a 2952 *
3d083395
SR
2953 * Register a function to be called by all functions in the
2954 * kernel.
2955 *
2956 * Note: @ops->func and all the functions it calls must be labeled
2957 * with "notrace", otherwise it will go into a
2958 * recursive loop.
16444a8a 2959 */
3d083395 2960int register_ftrace_function(struct ftrace_ops *ops)
16444a8a 2961{
b0fc494f
SR
2962 int ret;
2963
4eebcc81
SR
2964 if (unlikely(ftrace_disabled))
2965 return -1;
2966
e6ea44e9 2967 mutex_lock(&ftrace_lock);
e7d3737e 2968
b0fc494f 2969 ret = __register_ftrace_function(ops);
5a45cfe1 2970 ftrace_startup(0);
b0fc494f 2971
e6ea44e9 2972 mutex_unlock(&ftrace_lock);
b0fc494f 2973 return ret;
3d083395
SR
2974}
2975
2976/**
32632920 2977 * unregister_ftrace_function - unregister a function for profiling.
3d083395
SR
2978 * @ops - ops structure that holds the function to unregister
2979 *
2980 * Unregister a function that was added to be called by ftrace profiling.
2981 */
2982int unregister_ftrace_function(struct ftrace_ops *ops)
2983{
2984 int ret;
2985
e6ea44e9 2986 mutex_lock(&ftrace_lock);
3d083395 2987 ret = __unregister_ftrace_function(ops);
5a45cfe1 2988 ftrace_shutdown(0);
e6ea44e9 2989 mutex_unlock(&ftrace_lock);
b0fc494f
SR
2990
2991 return ret;
2992}
2993
e309b41d 2994int
b0fc494f 2995ftrace_enable_sysctl(struct ctl_table *table, int write,
8d65af78 2996 void __user *buffer, size_t *lenp,
b0fc494f
SR
2997 loff_t *ppos)
2998{
2999 int ret;
3000
4eebcc81
SR
3001 if (unlikely(ftrace_disabled))
3002 return -ENODEV;
3003
e6ea44e9 3004 mutex_lock(&ftrace_lock);
b0fc494f 3005
8d65af78 3006 ret = proc_dointvec(table, write, buffer, lenp, ppos);
b0fc494f 3007
a32c7765 3008 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
b0fc494f
SR
3009 goto out;
3010
a32c7765 3011 last_ftrace_enabled = !!ftrace_enabled;
b0fc494f
SR
3012
3013 if (ftrace_enabled) {
3014
3015 ftrace_startup_sysctl();
3016
3017 /* we are starting ftrace again */
3018 if (ftrace_list != &ftrace_list_end) {
3019 if (ftrace_list->next == &ftrace_list_end)
3020 ftrace_trace_function = ftrace_list->func;
3021 else
3022 ftrace_trace_function = ftrace_list_func;
3023 }
3024
3025 } else {
3026 /* stopping ftrace calls (just send to ftrace_stub) */
3027 ftrace_trace_function = ftrace_stub;
3028
3029 ftrace_shutdown_sysctl();
3030 }
3031
3032 out:
e6ea44e9 3033 mutex_unlock(&ftrace_lock);
3d083395 3034 return ret;
16444a8a 3035}
f17845e5 3036
fb52607a 3037#ifdef CONFIG_FUNCTION_GRAPH_TRACER
e7d3737e 3038
597af815 3039static int ftrace_graph_active;
4a2b8dda 3040static struct notifier_block ftrace_suspend_notifier;
e7d3737e 3041
e49dc19c
SR
3042int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
3043{
3044 return 0;
3045}
3046
287b6e68
FW
3047/* The callbacks that hook a function */
3048trace_func_graph_ret_t ftrace_graph_return =
3049 (trace_func_graph_ret_t)ftrace_stub;
e49dc19c 3050trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
f201ae23
FW
3051
3052/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
3053static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
3054{
3055 int i;
3056 int ret = 0;
3057 unsigned long flags;
3058 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
3059 struct task_struct *g, *t;
3060
3061 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
3062 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
3063 * sizeof(struct ftrace_ret_stack),
3064 GFP_KERNEL);
3065 if (!ret_stack_list[i]) {
3066 start = 0;
3067 end = i;
3068 ret = -ENOMEM;
3069 goto free;
3070 }
3071 }
3072
3073 read_lock_irqsave(&tasklist_lock, flags);
3074 do_each_thread(g, t) {
3075 if (start == end) {
3076 ret = -EAGAIN;
3077 goto unlock;
3078 }
3079
3080 if (t->ret_stack == NULL) {
380c4b14 3081 atomic_set(&t->tracing_graph_pause, 0);
f201ae23 3082 atomic_set(&t->trace_overrun, 0);
26c01624
SR
3083 t->curr_ret_stack = -1;
3084 /* Make sure the tasks see the -1 first: */
3085 smp_wmb();
3086 t->ret_stack = ret_stack_list[start++];
f201ae23
FW
3087 }
3088 } while_each_thread(g, t);
3089
3090unlock:
3091 read_unlock_irqrestore(&tasklist_lock, flags);
3092free:
3093 for (i = start; i < end; i++)
3094 kfree(ret_stack_list[i]);
3095 return ret;
3096}
3097
8aef2d28
SR
3098static void
3099ftrace_graph_probe_sched_switch(struct rq *__rq, struct task_struct *prev,
3100 struct task_struct *next)
3101{
3102 unsigned long long timestamp;
3103 int index;
3104
be6f164a
SR
3105 /*
3106 * Does the user want to count the time a function was asleep.
3107 * If so, do not update the time stamps.
3108 */
3109 if (trace_flags & TRACE_ITER_SLEEP_TIME)
3110 return;
3111
8aef2d28
SR
3112 timestamp = trace_clock_local();
3113
3114 prev->ftrace_timestamp = timestamp;
3115
3116 /* only process tasks that we timestamped */
3117 if (!next->ftrace_timestamp)
3118 return;
3119
3120 /*
3121 * Update all the counters in next to make up for the
3122 * time next was sleeping.
3123 */
3124 timestamp -= next->ftrace_timestamp;
3125
3126 for (index = next->curr_ret_stack; index >= 0; index--)
3127 next->ret_stack[index].calltime += timestamp;
3128}
3129
f201ae23 3130/* Allocate a return stack for each task */
fb52607a 3131static int start_graph_tracing(void)
f201ae23
FW
3132{
3133 struct ftrace_ret_stack **ret_stack_list;
5b058bcd 3134 int ret, cpu;
f201ae23
FW
3135
3136 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
3137 sizeof(struct ftrace_ret_stack *),
3138 GFP_KERNEL);
3139
3140 if (!ret_stack_list)
3141 return -ENOMEM;
3142
5b058bcd 3143 /* The cpu_boot init_task->ret_stack will never be freed */
179c498a
SR
3144 for_each_online_cpu(cpu) {
3145 if (!idle_task(cpu)->ret_stack)
3146 ftrace_graph_init_task(idle_task(cpu));
3147 }
5b058bcd 3148
f201ae23
FW
3149 do {
3150 ret = alloc_retstack_tasklist(ret_stack_list);
3151 } while (ret == -EAGAIN);
3152
8aef2d28
SR
3153 if (!ret) {
3154 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch);
3155 if (ret)
3156 pr_info("ftrace_graph: Couldn't activate tracepoint"
3157 " probe to kernel_sched_switch\n");
3158 }
3159
f201ae23
FW
3160 kfree(ret_stack_list);
3161 return ret;
3162}
3163
4a2b8dda
FW
3164/*
3165 * Hibernation protection.
3166 * The state of the current task is too much unstable during
3167 * suspend/restore to disk. We want to protect against that.
3168 */
3169static int
3170ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
3171 void *unused)
3172{
3173 switch (state) {
3174 case PM_HIBERNATION_PREPARE:
3175 pause_graph_tracing();
3176 break;
3177
3178 case PM_POST_HIBERNATION:
3179 unpause_graph_tracing();
3180 break;
3181 }
3182 return NOTIFY_DONE;
3183}
3184
287b6e68
FW
3185int register_ftrace_graph(trace_func_graph_ret_t retfunc,
3186 trace_func_graph_ent_t entryfunc)
15e6cb36 3187{
e7d3737e
FW
3188 int ret = 0;
3189
e6ea44e9 3190 mutex_lock(&ftrace_lock);
e7d3737e 3191
05ce5818 3192 /* we currently allow only one tracer registered at a time */
597af815 3193 if (ftrace_graph_active) {
05ce5818
SR
3194 ret = -EBUSY;
3195 goto out;
3196 }
3197
4a2b8dda
FW
3198 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
3199 register_pm_notifier(&ftrace_suspend_notifier);
3200
597af815 3201 ftrace_graph_active++;
fb52607a 3202 ret = start_graph_tracing();
f201ae23 3203 if (ret) {
597af815 3204 ftrace_graph_active--;
f201ae23
FW
3205 goto out;
3206 }
e53a6319 3207
287b6e68
FW
3208 ftrace_graph_return = retfunc;
3209 ftrace_graph_entry = entryfunc;
e53a6319 3210
5a45cfe1 3211 ftrace_startup(FTRACE_START_FUNC_RET);
e7d3737e
FW
3212
3213out:
e6ea44e9 3214 mutex_unlock(&ftrace_lock);
e7d3737e 3215 return ret;
15e6cb36
FW
3216}
3217
fb52607a 3218void unregister_ftrace_graph(void)
15e6cb36 3219{
e6ea44e9 3220 mutex_lock(&ftrace_lock);
e7d3737e 3221
597af815 3222 if (unlikely(!ftrace_graph_active))
2aad1b76
SR
3223 goto out;
3224
597af815 3225 ftrace_graph_active--;
8aef2d28 3226 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch);
287b6e68 3227 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
e49dc19c 3228 ftrace_graph_entry = ftrace_graph_entry_stub;
5a45cfe1 3229 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
4a2b8dda 3230 unregister_pm_notifier(&ftrace_suspend_notifier);
e7d3737e 3231
2aad1b76 3232 out:
e6ea44e9 3233 mutex_unlock(&ftrace_lock);
15e6cb36 3234}
f201ae23
FW
3235
3236/* Allocate a return stack for newly created task */
fb52607a 3237void ftrace_graph_init_task(struct task_struct *t)
f201ae23 3238{
84047e36
SR
3239 /* Make sure we do not use the parent ret_stack */
3240 t->ret_stack = NULL;
3241
597af815 3242 if (ftrace_graph_active) {
82310a32
SR
3243 struct ftrace_ret_stack *ret_stack;
3244
3245 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
f201ae23
FW
3246 * sizeof(struct ftrace_ret_stack),
3247 GFP_KERNEL);
82310a32 3248 if (!ret_stack)
f201ae23
FW
3249 return;
3250 t->curr_ret_stack = -1;
380c4b14 3251 atomic_set(&t->tracing_graph_pause, 0);
f201ae23 3252 atomic_set(&t->trace_overrun, 0);
8aef2d28 3253 t->ftrace_timestamp = 0;
82310a32
SR
3254 /* make curr_ret_stack visable before we add the ret_stack */
3255 smp_wmb();
3256 t->ret_stack = ret_stack;
84047e36 3257 }
f201ae23
FW
3258}
3259
fb52607a 3260void ftrace_graph_exit_task(struct task_struct *t)
f201ae23 3261{
eae849ca
FW
3262 struct ftrace_ret_stack *ret_stack = t->ret_stack;
3263
f201ae23 3264 t->ret_stack = NULL;
eae849ca
FW
3265 /* NULL must become visible to IRQs before we free it: */
3266 barrier();
3267
3268 kfree(ret_stack);
f201ae23 3269}
14a866c5
SR
3270
3271void ftrace_graph_stop(void)
3272{
3273 ftrace_stop();
3274}
15e6cb36
FW
3275#endif
3276
This page took 0.441216 seconds and 5 git commands to generate.