tracing: move function profiler data out of function struct
[deliverable/linux.git] / kernel / trace / ftrace.c
CommitLineData
16444a8a
ACM
1/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
3d083395
SR
16#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
5072c59f 19#include <linux/seq_file.h>
4a2b8dda 20#include <linux/suspend.h>
5072c59f 21#include <linux/debugfs.h>
3d083395 22#include <linux/hardirq.h>
2d8b820b 23#include <linux/kthread.h>
5072c59f 24#include <linux/uaccess.h>
f22f9a89 25#include <linux/kprobes.h>
2d8b820b 26#include <linux/ftrace.h>
b0fc494f 27#include <linux/sysctl.h>
5072c59f 28#include <linux/ctype.h>
3d083395 29#include <linux/list.h>
59df055f 30#include <linux/hash.h>
3d083395 31
8aef2d28
SR
32#include <trace/sched.h>
33
395a59d0
AS
34#include <asm/ftrace.h>
35
3d083395 36#include "trace.h"
bac429f0 37#include "trace_stat.h"
16444a8a 38
6912896e
SR
39#define FTRACE_WARN_ON(cond) \
40 do { \
41 if (WARN_ON(cond)) \
42 ftrace_kill(); \
43 } while (0)
44
45#define FTRACE_WARN_ON_ONCE(cond) \
46 do { \
47 if (WARN_ON_ONCE(cond)) \
48 ftrace_kill(); \
49 } while (0)
50
8fc0c701
SR
51/* hash bits for specific function selection */
52#define FTRACE_HASH_BITS 7
53#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
54
4eebcc81
SR
55/* ftrace_enabled is a method to turn ftrace on or off */
56int ftrace_enabled __read_mostly;
d61f82d0 57static int last_ftrace_enabled;
b0fc494f 58
60a7ecf4
SR
59/* Quick disabling of function tracer. */
60int function_trace_stop;
61
4eebcc81
SR
62/*
63 * ftrace_disabled is set when an anomaly is discovered.
64 * ftrace_disabled is much stronger than ftrace_enabled.
65 */
66static int ftrace_disabled __read_mostly;
67
52baf119 68static DEFINE_MUTEX(ftrace_lock);
b0fc494f 69
16444a8a
ACM
70static struct ftrace_ops ftrace_list_end __read_mostly =
71{
72 .func = ftrace_stub,
73};
74
75static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
76ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
60a7ecf4 77ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
df4fc315 78ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
16444a8a 79
f2252935 80static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
16444a8a
ACM
81{
82 struct ftrace_ops *op = ftrace_list;
83
84 /* in case someone actually ports this to alpha! */
85 read_barrier_depends();
86
87 while (op != &ftrace_list_end) {
88 /* silly alpha */
89 read_barrier_depends();
90 op->func(ip, parent_ip);
91 op = op->next;
92 };
93}
94
df4fc315
SR
95static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
96{
0ef8cde5 97 if (!test_tsk_trace_trace(current))
df4fc315
SR
98 return;
99
100 ftrace_pid_function(ip, parent_ip);
101}
102
103static void set_ftrace_pid_function(ftrace_func_t func)
104{
105 /* do not set ftrace_pid_function to itself! */
106 if (func != ftrace_pid_func)
107 ftrace_pid_function = func;
108}
109
16444a8a 110/**
3d083395 111 * clear_ftrace_function - reset the ftrace function
16444a8a 112 *
3d083395
SR
113 * This NULLs the ftrace function and in essence stops
114 * tracing. There may be lag
16444a8a 115 */
3d083395 116void clear_ftrace_function(void)
16444a8a 117{
3d083395 118 ftrace_trace_function = ftrace_stub;
60a7ecf4 119 __ftrace_trace_function = ftrace_stub;
df4fc315 120 ftrace_pid_function = ftrace_stub;
3d083395
SR
121}
122
60a7ecf4
SR
123#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
124/*
125 * For those archs that do not test ftrace_trace_stop in their
126 * mcount call site, we need to do it from C.
127 */
128static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
129{
130 if (function_trace_stop)
131 return;
132
133 __ftrace_trace_function(ip, parent_ip);
134}
135#endif
136
e309b41d 137static int __register_ftrace_function(struct ftrace_ops *ops)
3d083395 138{
16444a8a
ACM
139 ops->next = ftrace_list;
140 /*
141 * We are entering ops into the ftrace_list but another
142 * CPU might be walking that list. We need to make sure
143 * the ops->next pointer is valid before another CPU sees
144 * the ops pointer included into the ftrace_list.
145 */
146 smp_wmb();
147 ftrace_list = ops;
3d083395 148
b0fc494f 149 if (ftrace_enabled) {
df4fc315
SR
150 ftrace_func_t func;
151
152 if (ops->next == &ftrace_list_end)
153 func = ops->func;
154 else
155 func = ftrace_list_func;
156
978f3a45 157 if (ftrace_pid_trace) {
df4fc315
SR
158 set_ftrace_pid_function(func);
159 func = ftrace_pid_func;
160 }
161
b0fc494f
SR
162 /*
163 * For one func, simply call it directly.
164 * For more than one func, call the chain.
165 */
60a7ecf4 166#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
df4fc315 167 ftrace_trace_function = func;
60a7ecf4 168#else
df4fc315 169 __ftrace_trace_function = func;
60a7ecf4
SR
170 ftrace_trace_function = ftrace_test_stop_func;
171#endif
b0fc494f 172 }
3d083395 173
16444a8a
ACM
174 return 0;
175}
176
e309b41d 177static int __unregister_ftrace_function(struct ftrace_ops *ops)
16444a8a 178{
16444a8a 179 struct ftrace_ops **p;
16444a8a
ACM
180
181 /*
3d083395
SR
182 * If we are removing the last function, then simply point
183 * to the ftrace_stub.
16444a8a
ACM
184 */
185 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
186 ftrace_trace_function = ftrace_stub;
187 ftrace_list = &ftrace_list_end;
e6ea44e9 188 return 0;
16444a8a
ACM
189 }
190
191 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
192 if (*p == ops)
193 break;
194
e6ea44e9
SR
195 if (*p != ops)
196 return -1;
16444a8a
ACM
197
198 *p = (*p)->next;
199
b0fc494f
SR
200 if (ftrace_enabled) {
201 /* If we only have one func left, then call that directly */
df4fc315
SR
202 if (ftrace_list->next == &ftrace_list_end) {
203 ftrace_func_t func = ftrace_list->func;
204
978f3a45 205 if (ftrace_pid_trace) {
df4fc315
SR
206 set_ftrace_pid_function(func);
207 func = ftrace_pid_func;
208 }
209#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
210 ftrace_trace_function = func;
211#else
212 __ftrace_trace_function = func;
213#endif
214 }
b0fc494f 215 }
16444a8a 216
e6ea44e9 217 return 0;
3d083395
SR
218}
219
df4fc315
SR
220static void ftrace_update_pid_func(void)
221{
222 ftrace_func_t func;
223
df4fc315 224 if (ftrace_trace_function == ftrace_stub)
10dd3ebe 225 return;
df4fc315
SR
226
227 func = ftrace_trace_function;
228
978f3a45 229 if (ftrace_pid_trace) {
df4fc315
SR
230 set_ftrace_pid_function(func);
231 func = ftrace_pid_func;
232 } else {
66eafebc
LW
233 if (func == ftrace_pid_func)
234 func = ftrace_pid_function;
df4fc315
SR
235 }
236
237#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
238 ftrace_trace_function = func;
239#else
240 __ftrace_trace_function = func;
241#endif
df4fc315
SR
242}
243
493762fc
SR
244#ifdef CONFIG_FUNCTION_PROFILER
245struct ftrace_profile {
246 struct hlist_node node;
247 unsigned long ip;
248 unsigned long counter;
8fc0c701
SR
249};
250
493762fc
SR
251struct ftrace_profile_page {
252 struct ftrace_profile_page *next;
253 unsigned long index;
254 struct ftrace_profile records[];
d61f82d0
SR
255};
256
493762fc
SR
257#define PROFILE_RECORDS_SIZE \
258 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
5072c59f 259
493762fc
SR
260#define PROFILES_PER_PAGE \
261 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
3d083395 262
493762fc
SR
263/* TODO: make these percpu, to prevent cache line bouncing */
264static struct ftrace_profile_page *profile_pages_start;
265static struct ftrace_profile_page *profile_pages;
3c1720f0 266
bac429f0
SR
267static struct hlist_head *ftrace_profile_hash;
268static int ftrace_profile_bits;
269static int ftrace_profile_enabled;
270static DEFINE_MUTEX(ftrace_profile_lock);
271
493762fc
SR
272static DEFINE_PER_CPU(atomic_t, ftrace_profile_disable);
273
274#define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
275
276static raw_spinlock_t ftrace_profile_rec_lock =
277 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
278
bac429f0
SR
279static void *
280function_stat_next(void *v, int idx)
281{
493762fc
SR
282 struct ftrace_profile *rec = v;
283 struct ftrace_profile_page *pg;
bac429f0 284
493762fc 285 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
bac429f0
SR
286
287 again:
288 rec++;
289 if ((void *)rec >= (void *)&pg->records[pg->index]) {
290 pg = pg->next;
291 if (!pg)
292 return NULL;
293 rec = &pg->records[0];
493762fc
SR
294 if (!rec->counter)
295 goto again;
bac429f0
SR
296 }
297
bac429f0
SR
298 return rec;
299}
300
301static void *function_stat_start(struct tracer_stat *trace)
302{
493762fc 303 return function_stat_next(&profile_pages_start->records[0], 0);
bac429f0
SR
304}
305
306static int function_stat_cmp(void *p1, void *p2)
307{
493762fc
SR
308 struct ftrace_profile *a = p1;
309 struct ftrace_profile *b = p2;
bac429f0
SR
310
311 if (a->counter < b->counter)
312 return -1;
313 if (a->counter > b->counter)
314 return 1;
315 else
316 return 0;
317}
318
319static int function_stat_headers(struct seq_file *m)
320{
321 seq_printf(m, " Function Hit\n"
322 " -------- ---\n");
323 return 0;
324}
325
326static int function_stat_show(struct seq_file *m, void *v)
327{
493762fc 328 struct ftrace_profile *rec = v;
bac429f0
SR
329 char str[KSYM_SYMBOL_LEN];
330
331 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
332
333 seq_printf(m, " %-30.30s %10lu\n", str, rec->counter);
334 return 0;
335}
336
337static struct tracer_stat function_stats = {
338 .name = "functions",
339 .stat_start = function_stat_start,
340 .stat_next = function_stat_next,
341 .stat_cmp = function_stat_cmp,
342 .stat_headers = function_stat_headers,
343 .stat_show = function_stat_show
344};
345
493762fc 346static void ftrace_profile_reset(void)
bac429f0 347{
493762fc 348 struct ftrace_profile_page *pg;
bac429f0 349
493762fc 350 pg = profile_pages = profile_pages_start;
bac429f0 351
493762fc
SR
352 while (pg) {
353 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
354 pg->index = 0;
355 pg = pg->next;
bac429f0
SR
356 }
357
493762fc
SR
358 memset(ftrace_profile_hash, 0,
359 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
360}
bac429f0 361
493762fc
SR
362int ftrace_profile_pages_init(void)
363{
364 struct ftrace_profile_page *pg;
365 int i;
bac429f0 366
493762fc
SR
367 /* If we already allocated, do nothing */
368 if (profile_pages)
369 return 0;
bac429f0 370
493762fc
SR
371 profile_pages = (void *)get_zeroed_page(GFP_KERNEL);
372 if (!profile_pages)
373 return -ENOMEM;
bac429f0 374
493762fc 375 pg = profile_pages_start = profile_pages;
bac429f0 376
493762fc
SR
377 /* allocate 10 more pages to start */
378 for (i = 0; i < 10; i++) {
379 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
380 /*
381 * We only care about allocating profile_pages, if
382 * we failed to allocate here, hopefully we will allocate
383 * later.
384 */
385 if (!pg->next)
386 break;
387 pg = pg->next;
388 }
389
390 return 0;
bac429f0
SR
391}
392
493762fc 393static int ftrace_profile_init(void)
bac429f0 394{
493762fc 395 int size;
bac429f0 396
493762fc
SR
397 if (ftrace_profile_hash) {
398 /* If the profile is already created, simply reset it */
399 ftrace_profile_reset();
400 return 0;
401 }
bac429f0 402
493762fc
SR
403 /*
404 * We are profiling all functions, but usually only a few thousand
405 * functions are hit. We'll make a hash of 1024 items.
406 */
407 size = FTRACE_PROFILE_HASH_SIZE;
bac429f0 408
493762fc
SR
409 ftrace_profile_hash =
410 kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
411
412 if (!ftrace_profile_hash)
413 return -ENOMEM;
414
415 size--;
416
417 for (; size; size >>= 1)
418 ftrace_profile_bits++;
419
420 /* Preallocate a few pages */
421 if (ftrace_profile_pages_init() < 0) {
422 kfree(ftrace_profile_hash);
423 ftrace_profile_hash = NULL;
424 return -ENOMEM;
425 }
426
427 return 0;
bac429f0
SR
428}
429
493762fc
SR
430/* interrupts must be disabled */
431static struct ftrace_profile *ftrace_find_profiled_func(unsigned long ip)
bac429f0 432{
493762fc 433 struct ftrace_profile *rec;
bac429f0
SR
434 struct hlist_head *hhd;
435 struct hlist_node *n;
bac429f0
SR
436 unsigned long key;
437
bac429f0
SR
438 key = hash_long(ip, ftrace_profile_bits);
439 hhd = &ftrace_profile_hash[key];
440
441 if (hlist_empty(hhd))
442 return NULL;
443
bac429f0
SR
444 hlist_for_each_entry_rcu(rec, n, hhd, node) {
445 if (rec->ip == ip)
493762fc
SR
446 return rec;
447 }
448
449 return NULL;
450}
451
452static void ftrace_add_profile(struct ftrace_profile *rec)
453{
454 unsigned long key;
455
456 key = hash_long(rec->ip, ftrace_profile_bits);
457 hlist_add_head_rcu(&rec->node, &ftrace_profile_hash[key]);
458}
459
460/* Interrupts must be disabled calling this */
461static struct ftrace_profile *
462ftrace_profile_alloc(unsigned long ip, bool alloc_safe)
463{
464 struct ftrace_profile *rec = NULL;
465
466 /* prevent recursion */
467 if (atomic_inc_return(&__get_cpu_var(ftrace_profile_disable)) != 1)
468 goto out;
469
470 __raw_spin_lock(&ftrace_profile_rec_lock);
471
472 /* Try to always keep another page available */
473 if (!profile_pages->next && alloc_safe)
474 profile_pages->next = (void *)get_zeroed_page(GFP_ATOMIC);
475
476 /*
477 * Try to find the function again since another
478 * task on another CPU could have added it
479 */
480 rec = ftrace_find_profiled_func(ip);
481 if (rec)
482 goto out_unlock;
483
484 if (profile_pages->index == PROFILES_PER_PAGE) {
485 if (!profile_pages->next)
486 goto out_unlock;
487 profile_pages = profile_pages->next;
bac429f0 488 }
493762fc
SR
489
490 rec = &profile_pages->records[profile_pages->index++];
491 rec->ip = ip;
492 ftrace_add_profile(rec);
493
494 out_unlock:
495 __raw_spin_unlock(&ftrace_profile_rec_lock);
bac429f0 496 out:
493762fc 497 atomic_dec(&__get_cpu_var(ftrace_profile_disable));
bac429f0
SR
498
499 return rec;
500}
501
493762fc
SR
502/*
503 * If we are not in an interrupt, or softirq and
504 * and interrupts are disabled and preemption is not enabled
505 * (not in a spinlock) then it should be safe to allocate memory.
506 */
507static bool ftrace_safe_to_allocate(void)
508{
509 return !in_interrupt() && irqs_disabled() && !preempt_count();
510}
511
bac429f0
SR
512static void
513function_profile_call(unsigned long ip, unsigned long parent_ip)
514{
493762fc 515 struct ftrace_profile *rec;
bac429f0 516 unsigned long flags;
493762fc 517 bool alloc_safe;
bac429f0
SR
518
519 if (!ftrace_profile_enabled)
520 return;
521
493762fc
SR
522 alloc_safe = ftrace_safe_to_allocate();
523
bac429f0
SR
524 local_irq_save(flags);
525 rec = ftrace_find_profiled_func(ip);
493762fc
SR
526 if (!rec) {
527 rec = ftrace_profile_alloc(ip, alloc_safe);
528 if (!rec)
529 goto out;
530 }
bac429f0
SR
531
532 rec->counter++;
533 out:
534 local_irq_restore(flags);
535}
536
537static struct ftrace_ops ftrace_profile_ops __read_mostly =
538{
539 .func = function_profile_call,
540};
541
542static ssize_t
543ftrace_profile_write(struct file *filp, const char __user *ubuf,
544 size_t cnt, loff_t *ppos)
545{
546 unsigned long val;
547 char buf[64];
548 int ret;
549
bac429f0
SR
550 if (cnt >= sizeof(buf))
551 return -EINVAL;
552
553 if (copy_from_user(&buf, ubuf, cnt))
554 return -EFAULT;
555
556 buf[cnt] = 0;
557
558 ret = strict_strtoul(buf, 10, &val);
559 if (ret < 0)
560 return ret;
561
562 val = !!val;
563
564 mutex_lock(&ftrace_profile_lock);
565 if (ftrace_profile_enabled ^ val) {
566 if (val) {
493762fc
SR
567 ret = ftrace_profile_init();
568 if (ret < 0) {
569 cnt = ret;
570 goto out;
571 }
572
bac429f0
SR
573 register_ftrace_function(&ftrace_profile_ops);
574 ftrace_profile_enabled = 1;
575 } else {
576 ftrace_profile_enabled = 0;
577 unregister_ftrace_function(&ftrace_profile_ops);
578 }
579 }
493762fc 580 out:
bac429f0
SR
581 mutex_unlock(&ftrace_profile_lock);
582
583 filp->f_pos += cnt;
584
585 return cnt;
586}
587
493762fc
SR
588static ssize_t
589ftrace_profile_read(struct file *filp, char __user *ubuf,
590 size_t cnt, loff_t *ppos)
591{
592 char buf[64];
593 int r;
594
595 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
596 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
597}
598
bac429f0
SR
599static const struct file_operations ftrace_profile_fops = {
600 .open = tracing_open_generic,
601 .read = ftrace_profile_read,
602 .write = ftrace_profile_write,
603};
604
605static void ftrace_profile_debugfs(struct dentry *d_tracer)
606{
607 struct dentry *entry;
608 int ret;
609
610 ret = register_stat_tracer(&function_stats);
611 if (ret) {
612 pr_warning("Warning: could not register "
613 "function stats\n");
614 return;
615 }
616
617 entry = debugfs_create_file("function_profile_enabled", 0644,
618 d_tracer, NULL, &ftrace_profile_fops);
619 if (!entry)
620 pr_warning("Could not create debugfs "
621 "'function_profile_enabled' entry\n");
622}
623
bac429f0 624#else /* CONFIG_FUNCTION_PROFILER */
bac429f0
SR
625static void ftrace_profile_debugfs(struct dentry *d_tracer)
626{
627}
bac429f0
SR
628#endif /* CONFIG_FUNCTION_PROFILER */
629
493762fc
SR
630/* set when tracing only a pid */
631struct pid *ftrace_pid_trace;
632static struct pid * const ftrace_swapper_pid = &init_struct_pid;
633
634#ifdef CONFIG_DYNAMIC_FTRACE
635
636#ifndef CONFIG_FTRACE_MCOUNT_RECORD
637# error Dynamic ftrace depends on MCOUNT_RECORD
638#endif
639
640static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
641
642struct ftrace_func_probe {
643 struct hlist_node node;
644 struct ftrace_probe_ops *ops;
645 unsigned long flags;
646 unsigned long ip;
647 void *data;
648 struct rcu_head rcu;
649};
650
651enum {
652 FTRACE_ENABLE_CALLS = (1 << 0),
653 FTRACE_DISABLE_CALLS = (1 << 1),
654 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
655 FTRACE_ENABLE_MCOUNT = (1 << 3),
656 FTRACE_DISABLE_MCOUNT = (1 << 4),
657 FTRACE_START_FUNC_RET = (1 << 5),
658 FTRACE_STOP_FUNC_RET = (1 << 6),
659};
660
661static int ftrace_filtered;
662
663static struct dyn_ftrace *ftrace_new_addrs;
664
665static DEFINE_MUTEX(ftrace_regex_lock);
666
667struct ftrace_page {
668 struct ftrace_page *next;
669 int index;
670 struct dyn_ftrace records[];
671};
672
673#define ENTRIES_PER_PAGE \
674 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
675
676/* estimate from running different kernels */
677#define NR_TO_INIT 10000
678
679static struct ftrace_page *ftrace_pages_start;
680static struct ftrace_page *ftrace_pages;
681
682static struct dyn_ftrace *ftrace_free_records;
683
684/*
685 * This is a double for. Do not use 'break' to break out of the loop,
686 * you must use a goto.
687 */
688#define do_for_each_ftrace_rec(pg, rec) \
689 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
690 int _____i; \
691 for (_____i = 0; _____i < pg->index; _____i++) { \
692 rec = &pg->records[_____i];
693
694#define while_for_each_ftrace_rec() \
695 } \
696 }
697
ecea656d 698#ifdef CONFIG_KPROBES
f17845e5
IM
699
700static int frozen_record_count;
701
ecea656d
AS
702static inline void freeze_record(struct dyn_ftrace *rec)
703{
704 if (!(rec->flags & FTRACE_FL_FROZEN)) {
705 rec->flags |= FTRACE_FL_FROZEN;
706 frozen_record_count++;
707 }
708}
709
710static inline void unfreeze_record(struct dyn_ftrace *rec)
711{
712 if (rec->flags & FTRACE_FL_FROZEN) {
713 rec->flags &= ~FTRACE_FL_FROZEN;
714 frozen_record_count--;
715 }
716}
717
718static inline int record_frozen(struct dyn_ftrace *rec)
719{
720 return rec->flags & FTRACE_FL_FROZEN;
721}
722#else
723# define freeze_record(rec) ({ 0; })
724# define unfreeze_record(rec) ({ 0; })
725# define record_frozen(rec) ({ 0; })
726#endif /* CONFIG_KPROBES */
727
e309b41d 728static void ftrace_free_rec(struct dyn_ftrace *rec)
37ad5084 729{
ee000b7f 730 rec->freelist = ftrace_free_records;
37ad5084
SR
731 ftrace_free_records = rec;
732 rec->flags |= FTRACE_FL_FREE;
733}
734
fed1939c
SR
735void ftrace_release(void *start, unsigned long size)
736{
737 struct dyn_ftrace *rec;
738 struct ftrace_page *pg;
739 unsigned long s = (unsigned long)start;
740 unsigned long e = s + size;
fed1939c 741
00fd61ae 742 if (ftrace_disabled || !start)
fed1939c
SR
743 return;
744
52baf119 745 mutex_lock(&ftrace_lock);
265c831c 746 do_for_each_ftrace_rec(pg, rec) {
b00f0b6d 747 if ((rec->ip >= s) && (rec->ip < e) &&
493762fc 748 !(rec->flags & FTRACE_FL_FREE))
265c831c
SR
749 ftrace_free_rec(rec);
750 } while_for_each_ftrace_rec();
52baf119 751 mutex_unlock(&ftrace_lock);
fed1939c
SR
752}
753
e309b41d 754static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
3c1720f0 755{
37ad5084
SR
756 struct dyn_ftrace *rec;
757
758 /* First check for freed records */
759 if (ftrace_free_records) {
760 rec = ftrace_free_records;
761
37ad5084 762 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
6912896e 763 FTRACE_WARN_ON_ONCE(1);
37ad5084
SR
764 ftrace_free_records = NULL;
765 return NULL;
766 }
767
ee000b7f 768 ftrace_free_records = rec->freelist;
37ad5084
SR
769 memset(rec, 0, sizeof(*rec));
770 return rec;
771 }
772
3c1720f0 773 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
08f5ac90
SR
774 if (!ftrace_pages->next) {
775 /* allocate another page */
776 ftrace_pages->next =
777 (void *)get_zeroed_page(GFP_KERNEL);
778 if (!ftrace_pages->next)
779 return NULL;
780 }
3c1720f0
SR
781 ftrace_pages = ftrace_pages->next;
782 }
783
784 return &ftrace_pages->records[ftrace_pages->index++];
785}
786
08f5ac90 787static struct dyn_ftrace *
d61f82d0 788ftrace_record_ip(unsigned long ip)
3d083395 789{
08f5ac90 790 struct dyn_ftrace *rec;
3d083395 791
f3c7ac40 792 if (ftrace_disabled)
08f5ac90 793 return NULL;
3d083395 794
08f5ac90
SR
795 rec = ftrace_alloc_dyn_node(ip);
796 if (!rec)
797 return NULL;
3d083395 798
08f5ac90 799 rec->ip = ip;
ee000b7f 800 rec->newlist = ftrace_new_addrs;
e94142a6 801 ftrace_new_addrs = rec;
3d083395 802
08f5ac90 803 return rec;
3d083395
SR
804}
805
b17e8a37
SR
806static void print_ip_ins(const char *fmt, unsigned char *p)
807{
808 int i;
809
810 printk(KERN_CONT "%s", fmt);
811
812 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
813 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
814}
815
31e88909 816static void ftrace_bug(int failed, unsigned long ip)
b17e8a37
SR
817{
818 switch (failed) {
819 case -EFAULT:
820 FTRACE_WARN_ON_ONCE(1);
821 pr_info("ftrace faulted on modifying ");
822 print_ip_sym(ip);
823 break;
824 case -EINVAL:
825 FTRACE_WARN_ON_ONCE(1);
826 pr_info("ftrace failed to modify ");
827 print_ip_sym(ip);
b17e8a37 828 print_ip_ins(" actual: ", (unsigned char *)ip);
b17e8a37
SR
829 printk(KERN_CONT "\n");
830 break;
831 case -EPERM:
832 FTRACE_WARN_ON_ONCE(1);
833 pr_info("ftrace faulted on writing ");
834 print_ip_sym(ip);
835 break;
836 default:
837 FTRACE_WARN_ON_ONCE(1);
838 pr_info("ftrace faulted on unknown error ");
839 print_ip_sym(ip);
840 }
841}
842
3c1720f0 843
0eb96701 844static int
31e88909 845__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
5072c59f 846{
e7d3737e 847 unsigned long ftrace_addr;
6a24a244 848 unsigned long ip, fl;
e7d3737e 849
f0001207 850 ftrace_addr = (unsigned long)FTRACE_ADDR;
5072c59f
SR
851
852 ip = rec->ip;
853
982c350b
SR
854 /*
855 * If this record is not to be traced and
856 * it is not enabled then do nothing.
857 *
858 * If this record is not to be traced and
57794a9d 859 * it is enabled then disable it.
982c350b
SR
860 *
861 */
862 if (rec->flags & FTRACE_FL_NOTRACE) {
863 if (rec->flags & FTRACE_FL_ENABLED)
864 rec->flags &= ~FTRACE_FL_ENABLED;
865 else
866 return 0;
867
868 } else if (ftrace_filtered && enable) {
5072c59f 869 /*
982c350b 870 * Filtering is on:
5072c59f 871 */
a4500b84 872
982c350b 873 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
5072c59f 874
982c350b
SR
875 /* Record is filtered and enabled, do nothing */
876 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
0eb96701 877 return 0;
5072c59f 878
57794a9d 879 /* Record is not filtered or enabled, do nothing */
982c350b
SR
880 if (!fl)
881 return 0;
882
883 /* Record is not filtered but enabled, disable it */
884 if (fl == FTRACE_FL_ENABLED)
5072c59f 885 rec->flags &= ~FTRACE_FL_ENABLED;
982c350b
SR
886 else
887 /* Otherwise record is filtered but not enabled, enable it */
5072c59f 888 rec->flags |= FTRACE_FL_ENABLED;
5072c59f 889 } else {
982c350b 890 /* Disable or not filtered */
5072c59f 891
41c52c0d 892 if (enable) {
982c350b 893 /* if record is enabled, do nothing */
5072c59f 894 if (rec->flags & FTRACE_FL_ENABLED)
0eb96701 895 return 0;
982c350b 896
5072c59f 897 rec->flags |= FTRACE_FL_ENABLED;
982c350b 898
5072c59f 899 } else {
982c350b 900
57794a9d 901 /* if record is not enabled, do nothing */
5072c59f 902 if (!(rec->flags & FTRACE_FL_ENABLED))
0eb96701 903 return 0;
982c350b 904
5072c59f
SR
905 rec->flags &= ~FTRACE_FL_ENABLED;
906 }
907 }
908
982c350b 909 if (rec->flags & FTRACE_FL_ENABLED)
e7d3737e 910 return ftrace_make_call(rec, ftrace_addr);
31e88909 911 else
e7d3737e 912 return ftrace_make_nop(NULL, rec, ftrace_addr);
5072c59f
SR
913}
914
e309b41d 915static void ftrace_replace_code(int enable)
3c1720f0 916{
3c1720f0
SR
917 struct dyn_ftrace *rec;
918 struct ftrace_page *pg;
6a24a244 919 int failed;
3c1720f0 920
265c831c
SR
921 do_for_each_ftrace_rec(pg, rec) {
922 /*
fa9d13cf
Z
923 * Skip over free records, records that have
924 * failed and not converted.
265c831c
SR
925 */
926 if (rec->flags & FTRACE_FL_FREE ||
fa9d13cf 927 rec->flags & FTRACE_FL_FAILED ||
03303549 928 !(rec->flags & FTRACE_FL_CONVERTED))
265c831c
SR
929 continue;
930
931 /* ignore updates to this record's mcount site */
932 if (get_kprobe((void *)rec->ip)) {
933 freeze_record(rec);
934 continue;
935 } else {
936 unfreeze_record(rec);
937 }
f22f9a89 938
265c831c 939 failed = __ftrace_replace_code(rec, enable);
fa9d13cf 940 if (failed) {
265c831c
SR
941 rec->flags |= FTRACE_FL_FAILED;
942 if ((system_state == SYSTEM_BOOTING) ||
943 !core_kernel_text(rec->ip)) {
944 ftrace_free_rec(rec);
4377245a 945 } else {
265c831c 946 ftrace_bug(failed, rec->ip);
4377245a
SR
947 /* Stop processing */
948 return;
949 }
3c1720f0 950 }
265c831c 951 } while_for_each_ftrace_rec();
3c1720f0
SR
952}
953
492a7ea5 954static int
31e88909 955ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
3c1720f0
SR
956{
957 unsigned long ip;
593eb8a2 958 int ret;
3c1720f0
SR
959
960 ip = rec->ip;
961
25aac9dc 962 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
593eb8a2 963 if (ret) {
31e88909 964 ftrace_bug(ret, ip);
3c1720f0 965 rec->flags |= FTRACE_FL_FAILED;
492a7ea5 966 return 0;
37ad5084 967 }
492a7ea5 968 return 1;
3c1720f0
SR
969}
970
000ab691
SR
971/*
972 * archs can override this function if they must do something
973 * before the modifying code is performed.
974 */
975int __weak ftrace_arch_code_modify_prepare(void)
976{
977 return 0;
978}
979
980/*
981 * archs can override this function if they must do something
982 * after the modifying code is performed.
983 */
984int __weak ftrace_arch_code_modify_post_process(void)
985{
986 return 0;
987}
988
e309b41d 989static int __ftrace_modify_code(void *data)
3d083395 990{
d61f82d0
SR
991 int *command = data;
992
a3583244 993 if (*command & FTRACE_ENABLE_CALLS)
d61f82d0 994 ftrace_replace_code(1);
a3583244 995 else if (*command & FTRACE_DISABLE_CALLS)
d61f82d0
SR
996 ftrace_replace_code(0);
997
998 if (*command & FTRACE_UPDATE_TRACE_FUNC)
999 ftrace_update_ftrace_func(ftrace_trace_function);
1000
5a45cfe1
SR
1001 if (*command & FTRACE_START_FUNC_RET)
1002 ftrace_enable_ftrace_graph_caller();
1003 else if (*command & FTRACE_STOP_FUNC_RET)
1004 ftrace_disable_ftrace_graph_caller();
1005
d61f82d0 1006 return 0;
3d083395
SR
1007}
1008
e309b41d 1009static void ftrace_run_update_code(int command)
3d083395 1010{
000ab691
SR
1011 int ret;
1012
1013 ret = ftrace_arch_code_modify_prepare();
1014 FTRACE_WARN_ON(ret);
1015 if (ret)
1016 return;
1017
784e2d76 1018 stop_machine(__ftrace_modify_code, &command, NULL);
000ab691
SR
1019
1020 ret = ftrace_arch_code_modify_post_process();
1021 FTRACE_WARN_ON(ret);
3d083395
SR
1022}
1023
d61f82d0 1024static ftrace_func_t saved_ftrace_func;
60a7ecf4 1025static int ftrace_start_up;
df4fc315
SR
1026
1027static void ftrace_startup_enable(int command)
1028{
1029 if (saved_ftrace_func != ftrace_trace_function) {
1030 saved_ftrace_func = ftrace_trace_function;
1031 command |= FTRACE_UPDATE_TRACE_FUNC;
1032 }
1033
1034 if (!command || !ftrace_enabled)
1035 return;
1036
1037 ftrace_run_update_code(command);
1038}
d61f82d0 1039
5a45cfe1 1040static void ftrace_startup(int command)
3d083395 1041{
4eebcc81
SR
1042 if (unlikely(ftrace_disabled))
1043 return;
1044
60a7ecf4 1045 ftrace_start_up++;
982c350b 1046 command |= FTRACE_ENABLE_CALLS;
d61f82d0 1047
df4fc315 1048 ftrace_startup_enable(command);
3d083395
SR
1049}
1050
5a45cfe1 1051static void ftrace_shutdown(int command)
3d083395 1052{
4eebcc81
SR
1053 if (unlikely(ftrace_disabled))
1054 return;
1055
60a7ecf4
SR
1056 ftrace_start_up--;
1057 if (!ftrace_start_up)
d61f82d0 1058 command |= FTRACE_DISABLE_CALLS;
3d083395 1059
d61f82d0
SR
1060 if (saved_ftrace_func != ftrace_trace_function) {
1061 saved_ftrace_func = ftrace_trace_function;
1062 command |= FTRACE_UPDATE_TRACE_FUNC;
1063 }
3d083395 1064
d61f82d0 1065 if (!command || !ftrace_enabled)
e6ea44e9 1066 return;
d61f82d0
SR
1067
1068 ftrace_run_update_code(command);
3d083395
SR
1069}
1070
e309b41d 1071static void ftrace_startup_sysctl(void)
b0fc494f 1072{
d61f82d0
SR
1073 int command = FTRACE_ENABLE_MCOUNT;
1074
4eebcc81
SR
1075 if (unlikely(ftrace_disabled))
1076 return;
1077
d61f82d0
SR
1078 /* Force update next time */
1079 saved_ftrace_func = NULL;
60a7ecf4
SR
1080 /* ftrace_start_up is true if we want ftrace running */
1081 if (ftrace_start_up)
d61f82d0
SR
1082 command |= FTRACE_ENABLE_CALLS;
1083
1084 ftrace_run_update_code(command);
b0fc494f
SR
1085}
1086
e309b41d 1087static void ftrace_shutdown_sysctl(void)
b0fc494f 1088{
d61f82d0
SR
1089 int command = FTRACE_DISABLE_MCOUNT;
1090
4eebcc81
SR
1091 if (unlikely(ftrace_disabled))
1092 return;
1093
60a7ecf4
SR
1094 /* ftrace_start_up is true if ftrace is running */
1095 if (ftrace_start_up)
d61f82d0
SR
1096 command |= FTRACE_DISABLE_CALLS;
1097
1098 ftrace_run_update_code(command);
b0fc494f
SR
1099}
1100
3d083395
SR
1101static cycle_t ftrace_update_time;
1102static unsigned long ftrace_update_cnt;
1103unsigned long ftrace_update_tot_cnt;
1104
31e88909 1105static int ftrace_update_code(struct module *mod)
3d083395 1106{
e94142a6 1107 struct dyn_ftrace *p;
f22f9a89 1108 cycle_t start, stop;
3d083395 1109
750ed1a4 1110 start = ftrace_now(raw_smp_processor_id());
3d083395
SR
1111 ftrace_update_cnt = 0;
1112
e94142a6 1113 while (ftrace_new_addrs) {
3d083395 1114
08f5ac90
SR
1115 /* If something went wrong, bail without enabling anything */
1116 if (unlikely(ftrace_disabled))
1117 return -1;
f22f9a89 1118
e94142a6 1119 p = ftrace_new_addrs;
ee000b7f 1120 ftrace_new_addrs = p->newlist;
e94142a6 1121 p->flags = 0L;
f22f9a89 1122
08f5ac90 1123 /* convert record (i.e, patch mcount-call with NOP) */
31e88909 1124 if (ftrace_code_disable(mod, p)) {
08f5ac90
SR
1125 p->flags |= FTRACE_FL_CONVERTED;
1126 ftrace_update_cnt++;
1127 } else
1128 ftrace_free_rec(p);
3d083395
SR
1129 }
1130
750ed1a4 1131 stop = ftrace_now(raw_smp_processor_id());
3d083395
SR
1132 ftrace_update_time = stop - start;
1133 ftrace_update_tot_cnt += ftrace_update_cnt;
1134
16444a8a
ACM
1135 return 0;
1136}
1137
68bf21aa 1138static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
3c1720f0
SR
1139{
1140 struct ftrace_page *pg;
1141 int cnt;
1142 int i;
3c1720f0
SR
1143
1144 /* allocate a few pages */
1145 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
1146 if (!ftrace_pages_start)
1147 return -1;
1148
1149 /*
1150 * Allocate a few more pages.
1151 *
1152 * TODO: have some parser search vmlinux before
1153 * final linking to find all calls to ftrace.
1154 * Then we can:
1155 * a) know how many pages to allocate.
1156 * and/or
1157 * b) set up the table then.
1158 *
1159 * The dynamic code is still necessary for
1160 * modules.
1161 */
1162
1163 pg = ftrace_pages = ftrace_pages_start;
1164
68bf21aa 1165 cnt = num_to_init / ENTRIES_PER_PAGE;
08f5ac90 1166 pr_info("ftrace: allocating %ld entries in %d pages\n",
5821e1b7 1167 num_to_init, cnt + 1);
3c1720f0
SR
1168
1169 for (i = 0; i < cnt; i++) {
1170 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
1171
1172 /* If we fail, we'll try later anyway */
1173 if (!pg->next)
1174 break;
1175
1176 pg = pg->next;
1177 }
1178
1179 return 0;
1180}
1181
5072c59f
SR
1182enum {
1183 FTRACE_ITER_FILTER = (1 << 0),
1184 FTRACE_ITER_CONT = (1 << 1),
41c52c0d 1185 FTRACE_ITER_NOTRACE = (1 << 2),
eb9a7bf0 1186 FTRACE_ITER_FAILURES = (1 << 3),
0c75a3ed 1187 FTRACE_ITER_PRINTALL = (1 << 4),
8fc0c701 1188 FTRACE_ITER_HASH = (1 << 5),
5072c59f
SR
1189};
1190
1191#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
1192
1193struct ftrace_iterator {
5072c59f 1194 struct ftrace_page *pg;
8fc0c701 1195 int hidx;
431aa3fb 1196 int idx;
5072c59f
SR
1197 unsigned flags;
1198 unsigned char buffer[FTRACE_BUFF_MAX+1];
1199 unsigned buffer_idx;
1200 unsigned filtered;
1201};
1202
8fc0c701
SR
1203static void *
1204t_hash_next(struct seq_file *m, void *v, loff_t *pos)
1205{
1206 struct ftrace_iterator *iter = m->private;
1207 struct hlist_node *hnd = v;
1208 struct hlist_head *hhd;
1209
1210 WARN_ON(!(iter->flags & FTRACE_ITER_HASH));
1211
1212 (*pos)++;
1213
1214 retry:
1215 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
1216 return NULL;
1217
1218 hhd = &ftrace_func_hash[iter->hidx];
1219
1220 if (hlist_empty(hhd)) {
1221 iter->hidx++;
1222 hnd = NULL;
1223 goto retry;
1224 }
1225
1226 if (!hnd)
1227 hnd = hhd->first;
1228 else {
1229 hnd = hnd->next;
1230 if (!hnd) {
1231 iter->hidx++;
1232 goto retry;
1233 }
1234 }
1235
1236 return hnd;
1237}
1238
1239static void *t_hash_start(struct seq_file *m, loff_t *pos)
1240{
1241 struct ftrace_iterator *iter = m->private;
1242 void *p = NULL;
1243
1244 iter->flags |= FTRACE_ITER_HASH;
1245
1246 return t_hash_next(m, p, pos);
1247}
1248
1249static int t_hash_show(struct seq_file *m, void *v)
1250{
b6887d79 1251 struct ftrace_func_probe *rec;
8fc0c701
SR
1252 struct hlist_node *hnd = v;
1253 char str[KSYM_SYMBOL_LEN];
1254
b6887d79 1255 rec = hlist_entry(hnd, struct ftrace_func_probe, node);
8fc0c701 1256
809dcf29
SR
1257 if (rec->ops->print)
1258 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
1259
8fc0c701
SR
1260 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1261 seq_printf(m, "%s:", str);
1262
1263 kallsyms_lookup((unsigned long)rec->ops->func, NULL, NULL, NULL, str);
1264 seq_printf(m, "%s", str);
1265
1266 if (rec->data)
1267 seq_printf(m, ":%p", rec->data);
1268 seq_putc(m, '\n');
1269
1270 return 0;
1271}
1272
e309b41d 1273static void *
5072c59f
SR
1274t_next(struct seq_file *m, void *v, loff_t *pos)
1275{
1276 struct ftrace_iterator *iter = m->private;
1277 struct dyn_ftrace *rec = NULL;
1278
8fc0c701
SR
1279 if (iter->flags & FTRACE_ITER_HASH)
1280 return t_hash_next(m, v, pos);
1281
5072c59f
SR
1282 (*pos)++;
1283
0c75a3ed
SR
1284 if (iter->flags & FTRACE_ITER_PRINTALL)
1285 return NULL;
1286
5072c59f
SR
1287 retry:
1288 if (iter->idx >= iter->pg->index) {
1289 if (iter->pg->next) {
1290 iter->pg = iter->pg->next;
1291 iter->idx = 0;
1292 goto retry;
50cdaf08
LW
1293 } else {
1294 iter->idx = -1;
5072c59f
SR
1295 }
1296 } else {
1297 rec = &iter->pg->records[iter->idx++];
a9fdda33
SR
1298 if ((rec->flags & FTRACE_FL_FREE) ||
1299
1300 (!(iter->flags & FTRACE_ITER_FAILURES) &&
eb9a7bf0
AS
1301 (rec->flags & FTRACE_FL_FAILED)) ||
1302
1303 ((iter->flags & FTRACE_ITER_FAILURES) &&
a9fdda33 1304 !(rec->flags & FTRACE_FL_FAILED)) ||
eb9a7bf0 1305
0183fb1c
SR
1306 ((iter->flags & FTRACE_ITER_FILTER) &&
1307 !(rec->flags & FTRACE_FL_FILTER)) ||
1308
41c52c0d
SR
1309 ((iter->flags & FTRACE_ITER_NOTRACE) &&
1310 !(rec->flags & FTRACE_FL_NOTRACE))) {
5072c59f
SR
1311 rec = NULL;
1312 goto retry;
1313 }
1314 }
1315
5072c59f
SR
1316 return rec;
1317}
1318
1319static void *t_start(struct seq_file *m, loff_t *pos)
1320{
1321 struct ftrace_iterator *iter = m->private;
1322 void *p = NULL;
5072c59f 1323
8fc0c701 1324 mutex_lock(&ftrace_lock);
0c75a3ed
SR
1325 /*
1326 * For set_ftrace_filter reading, if we have the filter
1327 * off, we can short cut and just print out that all
1328 * functions are enabled.
1329 */
1330 if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
1331 if (*pos > 0)
8fc0c701 1332 return t_hash_start(m, pos);
0c75a3ed
SR
1333 iter->flags |= FTRACE_ITER_PRINTALL;
1334 (*pos)++;
1335 return iter;
1336 }
1337
8fc0c701
SR
1338 if (iter->flags & FTRACE_ITER_HASH)
1339 return t_hash_start(m, pos);
1340
50cdaf08
LW
1341 if (*pos > 0) {
1342 if (iter->idx < 0)
1343 return p;
1344 (*pos)--;
1345 iter->idx--;
1346 }
5821e1b7 1347
50cdaf08 1348 p = t_next(m, p, pos);
5072c59f 1349
8fc0c701
SR
1350 if (!p)
1351 return t_hash_start(m, pos);
1352
5072c59f
SR
1353 return p;
1354}
1355
1356static void t_stop(struct seq_file *m, void *p)
1357{
8fc0c701 1358 mutex_unlock(&ftrace_lock);
5072c59f
SR
1359}
1360
1361static int t_show(struct seq_file *m, void *v)
1362{
0c75a3ed 1363 struct ftrace_iterator *iter = m->private;
5072c59f
SR
1364 struct dyn_ftrace *rec = v;
1365 char str[KSYM_SYMBOL_LEN];
1366
8fc0c701
SR
1367 if (iter->flags & FTRACE_ITER_HASH)
1368 return t_hash_show(m, v);
1369
0c75a3ed
SR
1370 if (iter->flags & FTRACE_ITER_PRINTALL) {
1371 seq_printf(m, "#### all functions enabled ####\n");
1372 return 0;
1373 }
1374
5072c59f
SR
1375 if (!rec)
1376 return 0;
1377
1378 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1379
50cdaf08 1380 seq_printf(m, "%s\n", str);
5072c59f
SR
1381
1382 return 0;
1383}
1384
1385static struct seq_operations show_ftrace_seq_ops = {
1386 .start = t_start,
1387 .next = t_next,
1388 .stop = t_stop,
1389 .show = t_show,
1390};
1391
e309b41d 1392static int
5072c59f
SR
1393ftrace_avail_open(struct inode *inode, struct file *file)
1394{
1395 struct ftrace_iterator *iter;
1396 int ret;
1397
4eebcc81
SR
1398 if (unlikely(ftrace_disabled))
1399 return -ENODEV;
1400
5072c59f
SR
1401 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1402 if (!iter)
1403 return -ENOMEM;
1404
1405 iter->pg = ftrace_pages_start;
5072c59f
SR
1406
1407 ret = seq_open(file, &show_ftrace_seq_ops);
1408 if (!ret) {
1409 struct seq_file *m = file->private_data;
4bf39a94 1410
5072c59f 1411 m->private = iter;
4bf39a94 1412 } else {
5072c59f 1413 kfree(iter);
4bf39a94 1414 }
5072c59f
SR
1415
1416 return ret;
1417}
1418
1419int ftrace_avail_release(struct inode *inode, struct file *file)
1420{
1421 struct seq_file *m = (struct seq_file *)file->private_data;
1422 struct ftrace_iterator *iter = m->private;
1423
1424 seq_release(inode, file);
1425 kfree(iter);
4bf39a94 1426
5072c59f
SR
1427 return 0;
1428}
1429
eb9a7bf0
AS
1430static int
1431ftrace_failures_open(struct inode *inode, struct file *file)
1432{
1433 int ret;
1434 struct seq_file *m;
1435 struct ftrace_iterator *iter;
1436
1437 ret = ftrace_avail_open(inode, file);
1438 if (!ret) {
1439 m = (struct seq_file *)file->private_data;
1440 iter = (struct ftrace_iterator *)m->private;
1441 iter->flags = FTRACE_ITER_FAILURES;
1442 }
1443
1444 return ret;
1445}
1446
1447
41c52c0d 1448static void ftrace_filter_reset(int enable)
5072c59f
SR
1449{
1450 struct ftrace_page *pg;
1451 struct dyn_ftrace *rec;
41c52c0d 1452 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
5072c59f 1453
52baf119 1454 mutex_lock(&ftrace_lock);
41c52c0d
SR
1455 if (enable)
1456 ftrace_filtered = 0;
265c831c
SR
1457 do_for_each_ftrace_rec(pg, rec) {
1458 if (rec->flags & FTRACE_FL_FAILED)
1459 continue;
1460 rec->flags &= ~type;
1461 } while_for_each_ftrace_rec();
52baf119 1462 mutex_unlock(&ftrace_lock);
5072c59f
SR
1463}
1464
e309b41d 1465static int
41c52c0d 1466ftrace_regex_open(struct inode *inode, struct file *file, int enable)
5072c59f
SR
1467{
1468 struct ftrace_iterator *iter;
1469 int ret = 0;
1470
4eebcc81
SR
1471 if (unlikely(ftrace_disabled))
1472 return -ENODEV;
1473
5072c59f
SR
1474 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1475 if (!iter)
1476 return -ENOMEM;
1477
41c52c0d 1478 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1479 if ((file->f_mode & FMODE_WRITE) &&
1480 !(file->f_flags & O_APPEND))
41c52c0d 1481 ftrace_filter_reset(enable);
5072c59f
SR
1482
1483 if (file->f_mode & FMODE_READ) {
1484 iter->pg = ftrace_pages_start;
41c52c0d
SR
1485 iter->flags = enable ? FTRACE_ITER_FILTER :
1486 FTRACE_ITER_NOTRACE;
5072c59f
SR
1487
1488 ret = seq_open(file, &show_ftrace_seq_ops);
1489 if (!ret) {
1490 struct seq_file *m = file->private_data;
1491 m->private = iter;
1492 } else
1493 kfree(iter);
1494 } else
1495 file->private_data = iter;
41c52c0d 1496 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1497
1498 return ret;
1499}
1500
41c52c0d
SR
1501static int
1502ftrace_filter_open(struct inode *inode, struct file *file)
1503{
1504 return ftrace_regex_open(inode, file, 1);
1505}
1506
1507static int
1508ftrace_notrace_open(struct inode *inode, struct file *file)
1509{
1510 return ftrace_regex_open(inode, file, 0);
1511}
1512
e309b41d 1513static loff_t
41c52c0d 1514ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
5072c59f
SR
1515{
1516 loff_t ret;
1517
1518 if (file->f_mode & FMODE_READ)
1519 ret = seq_lseek(file, offset, origin);
1520 else
1521 file->f_pos = ret = 1;
1522
1523 return ret;
1524}
1525
1526enum {
1527 MATCH_FULL,
1528 MATCH_FRONT_ONLY,
1529 MATCH_MIDDLE_ONLY,
1530 MATCH_END_ONLY,
1531};
1532
9f4801e3
SR
1533/*
1534 * (static function - no need for kernel doc)
1535 *
1536 * Pass in a buffer containing a glob and this function will
1537 * set search to point to the search part of the buffer and
1538 * return the type of search it is (see enum above).
1539 * This does modify buff.
1540 *
1541 * Returns enum type.
1542 * search returns the pointer to use for comparison.
1543 * not returns 1 if buff started with a '!'
1544 * 0 otherwise.
1545 */
1546static int
64e7c440 1547ftrace_setup_glob(char *buff, int len, char **search, int *not)
5072c59f 1548{
5072c59f 1549 int type = MATCH_FULL;
9f4801e3 1550 int i;
ea3a6d6d
SR
1551
1552 if (buff[0] == '!') {
9f4801e3 1553 *not = 1;
ea3a6d6d
SR
1554 buff++;
1555 len--;
9f4801e3
SR
1556 } else
1557 *not = 0;
1558
1559 *search = buff;
5072c59f
SR
1560
1561 for (i = 0; i < len; i++) {
1562 if (buff[i] == '*') {
1563 if (!i) {
9f4801e3 1564 *search = buff + 1;
5072c59f 1565 type = MATCH_END_ONLY;
5072c59f 1566 } else {
9f4801e3 1567 if (type == MATCH_END_ONLY)
5072c59f 1568 type = MATCH_MIDDLE_ONLY;
9f4801e3 1569 else
5072c59f 1570 type = MATCH_FRONT_ONLY;
5072c59f
SR
1571 buff[i] = 0;
1572 break;
1573 }
1574 }
1575 }
1576
9f4801e3
SR
1577 return type;
1578}
1579
64e7c440 1580static int ftrace_match(char *str, char *regex, int len, int type)
9f4801e3 1581{
9f4801e3
SR
1582 int matched = 0;
1583 char *ptr;
1584
9f4801e3
SR
1585 switch (type) {
1586 case MATCH_FULL:
1587 if (strcmp(str, regex) == 0)
1588 matched = 1;
1589 break;
1590 case MATCH_FRONT_ONLY:
1591 if (strncmp(str, regex, len) == 0)
1592 matched = 1;
1593 break;
1594 case MATCH_MIDDLE_ONLY:
1595 if (strstr(str, regex))
1596 matched = 1;
1597 break;
1598 case MATCH_END_ONLY:
1599 ptr = strstr(str, regex);
1600 if (ptr && (ptr[len] == 0))
1601 matched = 1;
1602 break;
1603 }
1604
1605 return matched;
1606}
1607
64e7c440
SR
1608static int
1609ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
1610{
1611 char str[KSYM_SYMBOL_LEN];
1612
1613 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1614 return ftrace_match(str, regex, len, type);
1615}
1616
9f4801e3
SR
1617static void ftrace_match_records(char *buff, int len, int enable)
1618{
6a24a244 1619 unsigned int search_len;
9f4801e3
SR
1620 struct ftrace_page *pg;
1621 struct dyn_ftrace *rec;
6a24a244
SR
1622 unsigned long flag;
1623 char *search;
9f4801e3 1624 int type;
9f4801e3
SR
1625 int not;
1626
6a24a244 1627 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
9f4801e3
SR
1628 type = ftrace_setup_glob(buff, len, &search, &not);
1629
1630 search_len = strlen(search);
1631
52baf119 1632 mutex_lock(&ftrace_lock);
265c831c 1633 do_for_each_ftrace_rec(pg, rec) {
265c831c
SR
1634
1635 if (rec->flags & FTRACE_FL_FAILED)
1636 continue;
9f4801e3
SR
1637
1638 if (ftrace_match_record(rec, search, search_len, type)) {
265c831c
SR
1639 if (not)
1640 rec->flags &= ~flag;
1641 else
1642 rec->flags |= flag;
1643 }
e68746a2
SR
1644 /*
1645 * Only enable filtering if we have a function that
1646 * is filtered on.
1647 */
1648 if (enable && (rec->flags & FTRACE_FL_FILTER))
1649 ftrace_filtered = 1;
265c831c 1650 } while_for_each_ftrace_rec();
52baf119 1651 mutex_unlock(&ftrace_lock);
5072c59f
SR
1652}
1653
64e7c440
SR
1654static int
1655ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
1656 char *regex, int len, int type)
1657{
1658 char str[KSYM_SYMBOL_LEN];
1659 char *modname;
1660
1661 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
1662
1663 if (!modname || strcmp(modname, mod))
1664 return 0;
1665
1666 /* blank search means to match all funcs in the mod */
1667 if (len)
1668 return ftrace_match(str, regex, len, type);
1669 else
1670 return 1;
1671}
1672
1673static void ftrace_match_module_records(char *buff, char *mod, int enable)
1674{
6a24a244 1675 unsigned search_len = 0;
64e7c440
SR
1676 struct ftrace_page *pg;
1677 struct dyn_ftrace *rec;
1678 int type = MATCH_FULL;
6a24a244
SR
1679 char *search = buff;
1680 unsigned long flag;
64e7c440
SR
1681 int not = 0;
1682
6a24a244
SR
1683 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1684
64e7c440
SR
1685 /* blank or '*' mean the same */
1686 if (strcmp(buff, "*") == 0)
1687 buff[0] = 0;
1688
1689 /* handle the case of 'dont filter this module' */
1690 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
1691 buff[0] = 0;
1692 not = 1;
1693 }
1694
1695 if (strlen(buff)) {
1696 type = ftrace_setup_glob(buff, strlen(buff), &search, &not);
1697 search_len = strlen(search);
1698 }
1699
52baf119 1700 mutex_lock(&ftrace_lock);
64e7c440
SR
1701 do_for_each_ftrace_rec(pg, rec) {
1702
1703 if (rec->flags & FTRACE_FL_FAILED)
1704 continue;
1705
1706 if (ftrace_match_module_record(rec, mod,
1707 search, search_len, type)) {
1708 if (not)
1709 rec->flags &= ~flag;
1710 else
1711 rec->flags |= flag;
1712 }
e68746a2
SR
1713 if (enable && (rec->flags & FTRACE_FL_FILTER))
1714 ftrace_filtered = 1;
64e7c440
SR
1715
1716 } while_for_each_ftrace_rec();
52baf119 1717 mutex_unlock(&ftrace_lock);
64e7c440
SR
1718}
1719
f6180773
SR
1720/*
1721 * We register the module command as a template to show others how
1722 * to register the a command as well.
1723 */
1724
1725static int
1726ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
1727{
1728 char *mod;
1729
1730 /*
1731 * cmd == 'mod' because we only registered this func
1732 * for the 'mod' ftrace_func_command.
1733 * But if you register one func with multiple commands,
1734 * you can tell which command was used by the cmd
1735 * parameter.
1736 */
1737
1738 /* we must have a module name */
1739 if (!param)
1740 return -EINVAL;
1741
1742 mod = strsep(&param, ":");
1743 if (!strlen(mod))
1744 return -EINVAL;
1745
1746 ftrace_match_module_records(func, mod, enable);
1747 return 0;
1748}
1749
1750static struct ftrace_func_command ftrace_mod_cmd = {
1751 .name = "mod",
1752 .func = ftrace_mod_callback,
1753};
1754
1755static int __init ftrace_mod_cmd_init(void)
1756{
1757 return register_ftrace_command(&ftrace_mod_cmd);
1758}
1759device_initcall(ftrace_mod_cmd_init);
1760
59df055f 1761static void
b6887d79 1762function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
59df055f 1763{
b6887d79 1764 struct ftrace_func_probe *entry;
59df055f
SR
1765 struct hlist_head *hhd;
1766 struct hlist_node *n;
1767 unsigned long key;
1768 int resched;
1769
1770 key = hash_long(ip, FTRACE_HASH_BITS);
1771
1772 hhd = &ftrace_func_hash[key];
1773
1774 if (hlist_empty(hhd))
1775 return;
1776
1777 /*
1778 * Disable preemption for these calls to prevent a RCU grace
1779 * period. This syncs the hash iteration and freeing of items
1780 * on the hash. rcu_read_lock is too dangerous here.
1781 */
1782 resched = ftrace_preempt_disable();
1783 hlist_for_each_entry_rcu(entry, n, hhd, node) {
1784 if (entry->ip == ip)
1785 entry->ops->func(ip, parent_ip, &entry->data);
1786 }
1787 ftrace_preempt_enable(resched);
1788}
1789
b6887d79 1790static struct ftrace_ops trace_probe_ops __read_mostly =
59df055f 1791{
b6887d79 1792 .func = function_trace_probe_call,
59df055f
SR
1793};
1794
b6887d79 1795static int ftrace_probe_registered;
59df055f 1796
b6887d79 1797static void __enable_ftrace_function_probe(void)
59df055f
SR
1798{
1799 int i;
1800
b6887d79 1801 if (ftrace_probe_registered)
59df055f
SR
1802 return;
1803
1804 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1805 struct hlist_head *hhd = &ftrace_func_hash[i];
1806 if (hhd->first)
1807 break;
1808 }
1809 /* Nothing registered? */
1810 if (i == FTRACE_FUNC_HASHSIZE)
1811 return;
1812
b6887d79 1813 __register_ftrace_function(&trace_probe_ops);
59df055f 1814 ftrace_startup(0);
b6887d79 1815 ftrace_probe_registered = 1;
59df055f
SR
1816}
1817
b6887d79 1818static void __disable_ftrace_function_probe(void)
59df055f
SR
1819{
1820 int i;
1821
b6887d79 1822 if (!ftrace_probe_registered)
59df055f
SR
1823 return;
1824
1825 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1826 struct hlist_head *hhd = &ftrace_func_hash[i];
1827 if (hhd->first)
1828 return;
1829 }
1830
1831 /* no more funcs left */
b6887d79 1832 __unregister_ftrace_function(&trace_probe_ops);
59df055f 1833 ftrace_shutdown(0);
b6887d79 1834 ftrace_probe_registered = 0;
59df055f
SR
1835}
1836
1837
1838static void ftrace_free_entry_rcu(struct rcu_head *rhp)
1839{
b6887d79
SR
1840 struct ftrace_func_probe *entry =
1841 container_of(rhp, struct ftrace_func_probe, rcu);
59df055f
SR
1842
1843 if (entry->ops->free)
1844 entry->ops->free(&entry->data);
1845 kfree(entry);
1846}
1847
1848
1849int
b6887d79 1850register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
59df055f
SR
1851 void *data)
1852{
b6887d79 1853 struct ftrace_func_probe *entry;
59df055f
SR
1854 struct ftrace_page *pg;
1855 struct dyn_ftrace *rec;
59df055f 1856 int type, len, not;
6a24a244 1857 unsigned long key;
59df055f
SR
1858 int count = 0;
1859 char *search;
1860
1861 type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
1862 len = strlen(search);
1863
b6887d79 1864 /* we do not support '!' for function probes */
59df055f
SR
1865 if (WARN_ON(not))
1866 return -EINVAL;
1867
1868 mutex_lock(&ftrace_lock);
1869 do_for_each_ftrace_rec(pg, rec) {
1870
1871 if (rec->flags & FTRACE_FL_FAILED)
1872 continue;
1873
1874 if (!ftrace_match_record(rec, search, len, type))
1875 continue;
1876
1877 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1878 if (!entry) {
b6887d79 1879 /* If we did not process any, then return error */
59df055f
SR
1880 if (!count)
1881 count = -ENOMEM;
1882 goto out_unlock;
1883 }
1884
1885 count++;
1886
1887 entry->data = data;
1888
1889 /*
1890 * The caller might want to do something special
1891 * for each function we find. We call the callback
1892 * to give the caller an opportunity to do so.
1893 */
1894 if (ops->callback) {
1895 if (ops->callback(rec->ip, &entry->data) < 0) {
1896 /* caller does not like this func */
1897 kfree(entry);
1898 continue;
1899 }
1900 }
1901
1902 entry->ops = ops;
1903 entry->ip = rec->ip;
1904
1905 key = hash_long(entry->ip, FTRACE_HASH_BITS);
1906 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
1907
1908 } while_for_each_ftrace_rec();
b6887d79 1909 __enable_ftrace_function_probe();
59df055f
SR
1910
1911 out_unlock:
1912 mutex_unlock(&ftrace_lock);
1913
1914 return count;
1915}
1916
1917enum {
b6887d79
SR
1918 PROBE_TEST_FUNC = 1,
1919 PROBE_TEST_DATA = 2
59df055f
SR
1920};
1921
1922static void
b6887d79 1923__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
59df055f
SR
1924 void *data, int flags)
1925{
b6887d79 1926 struct ftrace_func_probe *entry;
59df055f
SR
1927 struct hlist_node *n, *tmp;
1928 char str[KSYM_SYMBOL_LEN];
1929 int type = MATCH_FULL;
1930 int i, len = 0;
1931 char *search;
1932
1933 if (glob && (strcmp(glob, "*") || !strlen(glob)))
1934 glob = NULL;
1935 else {
1936 int not;
1937
1938 type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
1939 len = strlen(search);
1940
b6887d79 1941 /* we do not support '!' for function probes */
59df055f
SR
1942 if (WARN_ON(not))
1943 return;
1944 }
1945
1946 mutex_lock(&ftrace_lock);
1947 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1948 struct hlist_head *hhd = &ftrace_func_hash[i];
1949
1950 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
1951
1952 /* break up if statements for readability */
b6887d79 1953 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
59df055f
SR
1954 continue;
1955
b6887d79 1956 if ((flags & PROBE_TEST_DATA) && entry->data != data)
59df055f
SR
1957 continue;
1958
1959 /* do this last, since it is the most expensive */
1960 if (glob) {
1961 kallsyms_lookup(entry->ip, NULL, NULL,
1962 NULL, str);
1963 if (!ftrace_match(str, glob, len, type))
1964 continue;
1965 }
1966
1967 hlist_del(&entry->node);
1968 call_rcu(&entry->rcu, ftrace_free_entry_rcu);
1969 }
1970 }
b6887d79 1971 __disable_ftrace_function_probe();
59df055f
SR
1972 mutex_unlock(&ftrace_lock);
1973}
1974
1975void
b6887d79 1976unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
59df055f
SR
1977 void *data)
1978{
b6887d79
SR
1979 __unregister_ftrace_function_probe(glob, ops, data,
1980 PROBE_TEST_FUNC | PROBE_TEST_DATA);
59df055f
SR
1981}
1982
1983void
b6887d79 1984unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
59df055f 1985{
b6887d79 1986 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
59df055f
SR
1987}
1988
b6887d79 1989void unregister_ftrace_function_probe_all(char *glob)
59df055f 1990{
b6887d79 1991 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
59df055f
SR
1992}
1993
f6180773
SR
1994static LIST_HEAD(ftrace_commands);
1995static DEFINE_MUTEX(ftrace_cmd_mutex);
1996
1997int register_ftrace_command(struct ftrace_func_command *cmd)
1998{
1999 struct ftrace_func_command *p;
2000 int ret = 0;
2001
2002 mutex_lock(&ftrace_cmd_mutex);
2003 list_for_each_entry(p, &ftrace_commands, list) {
2004 if (strcmp(cmd->name, p->name) == 0) {
2005 ret = -EBUSY;
2006 goto out_unlock;
2007 }
2008 }
2009 list_add(&cmd->list, &ftrace_commands);
2010 out_unlock:
2011 mutex_unlock(&ftrace_cmd_mutex);
2012
2013 return ret;
2014}
2015
2016int unregister_ftrace_command(struct ftrace_func_command *cmd)
2017{
2018 struct ftrace_func_command *p, *n;
2019 int ret = -ENODEV;
2020
2021 mutex_lock(&ftrace_cmd_mutex);
2022 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
2023 if (strcmp(cmd->name, p->name) == 0) {
2024 ret = 0;
2025 list_del_init(&p->list);
2026 goto out_unlock;
2027 }
2028 }
2029 out_unlock:
2030 mutex_unlock(&ftrace_cmd_mutex);
2031
2032 return ret;
2033}
2034
64e7c440
SR
2035static int ftrace_process_regex(char *buff, int len, int enable)
2036{
f6180773 2037 char *func, *command, *next = buff;
6a24a244 2038 struct ftrace_func_command *p;
f6180773 2039 int ret = -EINVAL;
64e7c440
SR
2040
2041 func = strsep(&next, ":");
2042
2043 if (!next) {
2044 ftrace_match_records(func, len, enable);
2045 return 0;
2046 }
2047
f6180773 2048 /* command found */
64e7c440
SR
2049
2050 command = strsep(&next, ":");
2051
f6180773
SR
2052 mutex_lock(&ftrace_cmd_mutex);
2053 list_for_each_entry(p, &ftrace_commands, list) {
2054 if (strcmp(p->name, command) == 0) {
2055 ret = p->func(func, command, next, enable);
2056 goto out_unlock;
2057 }
64e7c440 2058 }
f6180773
SR
2059 out_unlock:
2060 mutex_unlock(&ftrace_cmd_mutex);
64e7c440 2061
f6180773 2062 return ret;
64e7c440
SR
2063}
2064
e309b41d 2065static ssize_t
41c52c0d
SR
2066ftrace_regex_write(struct file *file, const char __user *ubuf,
2067 size_t cnt, loff_t *ppos, int enable)
5072c59f
SR
2068{
2069 struct ftrace_iterator *iter;
2070 char ch;
2071 size_t read = 0;
2072 ssize_t ret;
2073
2074 if (!cnt || cnt < 0)
2075 return 0;
2076
41c52c0d 2077 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
2078
2079 if (file->f_mode & FMODE_READ) {
2080 struct seq_file *m = file->private_data;
2081 iter = m->private;
2082 } else
2083 iter = file->private_data;
2084
2085 if (!*ppos) {
2086 iter->flags &= ~FTRACE_ITER_CONT;
2087 iter->buffer_idx = 0;
2088 }
2089
2090 ret = get_user(ch, ubuf++);
2091 if (ret)
2092 goto out;
2093 read++;
2094 cnt--;
2095
2096 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
2097 /* skip white space */
2098 while (cnt && isspace(ch)) {
2099 ret = get_user(ch, ubuf++);
2100 if (ret)
2101 goto out;
2102 read++;
2103 cnt--;
2104 }
2105
5072c59f
SR
2106 if (isspace(ch)) {
2107 file->f_pos += read;
2108 ret = read;
2109 goto out;
2110 }
2111
2112 iter->buffer_idx = 0;
2113 }
2114
2115 while (cnt && !isspace(ch)) {
2116 if (iter->buffer_idx < FTRACE_BUFF_MAX)
2117 iter->buffer[iter->buffer_idx++] = ch;
2118 else {
2119 ret = -EINVAL;
2120 goto out;
2121 }
2122 ret = get_user(ch, ubuf++);
2123 if (ret)
2124 goto out;
2125 read++;
2126 cnt--;
2127 }
2128
2129 if (isspace(ch)) {
2130 iter->filtered++;
2131 iter->buffer[iter->buffer_idx] = 0;
64e7c440
SR
2132 ret = ftrace_process_regex(iter->buffer,
2133 iter->buffer_idx, enable);
2134 if (ret)
2135 goto out;
5072c59f
SR
2136 iter->buffer_idx = 0;
2137 } else
2138 iter->flags |= FTRACE_ITER_CONT;
2139
2140
2141 file->f_pos += read;
2142
2143 ret = read;
2144 out:
41c52c0d 2145 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
2146
2147 return ret;
2148}
2149
41c52c0d
SR
2150static ssize_t
2151ftrace_filter_write(struct file *file, const char __user *ubuf,
2152 size_t cnt, loff_t *ppos)
2153{
2154 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
2155}
2156
2157static ssize_t
2158ftrace_notrace_write(struct file *file, const char __user *ubuf,
2159 size_t cnt, loff_t *ppos)
2160{
2161 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
2162}
2163
2164static void
2165ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
2166{
2167 if (unlikely(ftrace_disabled))
2168 return;
2169
2170 mutex_lock(&ftrace_regex_lock);
2171 if (reset)
2172 ftrace_filter_reset(enable);
2173 if (buf)
7f24b31b 2174 ftrace_match_records(buf, len, enable);
41c52c0d
SR
2175 mutex_unlock(&ftrace_regex_lock);
2176}
2177
77a2b37d
SR
2178/**
2179 * ftrace_set_filter - set a function to filter on in ftrace
2180 * @buf - the string that holds the function filter text.
2181 * @len - the length of the string.
2182 * @reset - non zero to reset all filters before applying this filter.
2183 *
2184 * Filters denote which functions should be enabled when tracing is enabled.
2185 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2186 */
e309b41d 2187void ftrace_set_filter(unsigned char *buf, int len, int reset)
77a2b37d 2188{
41c52c0d
SR
2189 ftrace_set_regex(buf, len, reset, 1);
2190}
4eebcc81 2191
41c52c0d
SR
2192/**
2193 * ftrace_set_notrace - set a function to not trace in ftrace
2194 * @buf - the string that holds the function notrace text.
2195 * @len - the length of the string.
2196 * @reset - non zero to reset all filters before applying this filter.
2197 *
2198 * Notrace Filters denote which functions should not be enabled when tracing
2199 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2200 * for tracing.
2201 */
2202void ftrace_set_notrace(unsigned char *buf, int len, int reset)
2203{
2204 ftrace_set_regex(buf, len, reset, 0);
77a2b37d
SR
2205}
2206
e309b41d 2207static int
41c52c0d 2208ftrace_regex_release(struct inode *inode, struct file *file, int enable)
5072c59f
SR
2209{
2210 struct seq_file *m = (struct seq_file *)file->private_data;
2211 struct ftrace_iterator *iter;
2212
41c52c0d 2213 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
2214 if (file->f_mode & FMODE_READ) {
2215 iter = m->private;
2216
2217 seq_release(inode, file);
2218 } else
2219 iter = file->private_data;
2220
2221 if (iter->buffer_idx) {
2222 iter->filtered++;
2223 iter->buffer[iter->buffer_idx] = 0;
7f24b31b 2224 ftrace_match_records(iter->buffer, iter->buffer_idx, enable);
5072c59f
SR
2225 }
2226
e6ea44e9 2227 mutex_lock(&ftrace_lock);
ee02a2e5 2228 if (ftrace_start_up && ftrace_enabled)
5072c59f 2229 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
e6ea44e9 2230 mutex_unlock(&ftrace_lock);
5072c59f
SR
2231
2232 kfree(iter);
41c52c0d 2233 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
2234 return 0;
2235}
2236
41c52c0d
SR
2237static int
2238ftrace_filter_release(struct inode *inode, struct file *file)
2239{
2240 return ftrace_regex_release(inode, file, 1);
2241}
2242
2243static int
2244ftrace_notrace_release(struct inode *inode, struct file *file)
2245{
2246 return ftrace_regex_release(inode, file, 0);
2247}
2248
5e2336a0 2249static const struct file_operations ftrace_avail_fops = {
5072c59f
SR
2250 .open = ftrace_avail_open,
2251 .read = seq_read,
2252 .llseek = seq_lseek,
2253 .release = ftrace_avail_release,
2254};
2255
5e2336a0 2256static const struct file_operations ftrace_failures_fops = {
eb9a7bf0
AS
2257 .open = ftrace_failures_open,
2258 .read = seq_read,
2259 .llseek = seq_lseek,
2260 .release = ftrace_avail_release,
2261};
2262
5e2336a0 2263static const struct file_operations ftrace_filter_fops = {
5072c59f 2264 .open = ftrace_filter_open,
850a80cf 2265 .read = seq_read,
5072c59f 2266 .write = ftrace_filter_write,
41c52c0d 2267 .llseek = ftrace_regex_lseek,
5072c59f
SR
2268 .release = ftrace_filter_release,
2269};
2270
5e2336a0 2271static const struct file_operations ftrace_notrace_fops = {
41c52c0d 2272 .open = ftrace_notrace_open,
850a80cf 2273 .read = seq_read,
41c52c0d
SR
2274 .write = ftrace_notrace_write,
2275 .llseek = ftrace_regex_lseek,
2276 .release = ftrace_notrace_release,
2277};
2278
ea4e2bc4
SR
2279#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2280
2281static DEFINE_MUTEX(graph_lock);
2282
2283int ftrace_graph_count;
2284unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
2285
2286static void *
2287g_next(struct seq_file *m, void *v, loff_t *pos)
2288{
2289 unsigned long *array = m->private;
2290 int index = *pos;
2291
2292 (*pos)++;
2293
2294 if (index >= ftrace_graph_count)
2295 return NULL;
2296
2297 return &array[index];
2298}
2299
2300static void *g_start(struct seq_file *m, loff_t *pos)
2301{
2302 void *p = NULL;
2303
2304 mutex_lock(&graph_lock);
2305
f9349a8f
FW
2306 /* Nothing, tell g_show to print all functions are enabled */
2307 if (!ftrace_graph_count && !*pos)
2308 return (void *)1;
2309
ea4e2bc4
SR
2310 p = g_next(m, p, pos);
2311
2312 return p;
2313}
2314
2315static void g_stop(struct seq_file *m, void *p)
2316{
2317 mutex_unlock(&graph_lock);
2318}
2319
2320static int g_show(struct seq_file *m, void *v)
2321{
2322 unsigned long *ptr = v;
2323 char str[KSYM_SYMBOL_LEN];
2324
2325 if (!ptr)
2326 return 0;
2327
f9349a8f
FW
2328 if (ptr == (unsigned long *)1) {
2329 seq_printf(m, "#### all functions enabled ####\n");
2330 return 0;
2331 }
2332
ea4e2bc4
SR
2333 kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
2334
2335 seq_printf(m, "%s\n", str);
2336
2337 return 0;
2338}
2339
2340static struct seq_operations ftrace_graph_seq_ops = {
2341 .start = g_start,
2342 .next = g_next,
2343 .stop = g_stop,
2344 .show = g_show,
2345};
2346
2347static int
2348ftrace_graph_open(struct inode *inode, struct file *file)
2349{
2350 int ret = 0;
2351
2352 if (unlikely(ftrace_disabled))
2353 return -ENODEV;
2354
2355 mutex_lock(&graph_lock);
2356 if ((file->f_mode & FMODE_WRITE) &&
2357 !(file->f_flags & O_APPEND)) {
2358 ftrace_graph_count = 0;
2359 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
2360 }
2361
2362 if (file->f_mode & FMODE_READ) {
2363 ret = seq_open(file, &ftrace_graph_seq_ops);
2364 if (!ret) {
2365 struct seq_file *m = file->private_data;
2366 m->private = ftrace_graph_funcs;
2367 }
2368 } else
2369 file->private_data = ftrace_graph_funcs;
2370 mutex_unlock(&graph_lock);
2371
2372 return ret;
2373}
2374
ea4e2bc4 2375static int
f9349a8f 2376ftrace_set_func(unsigned long *array, int *idx, char *buffer)
ea4e2bc4 2377{
ea4e2bc4
SR
2378 struct dyn_ftrace *rec;
2379 struct ftrace_page *pg;
f9349a8f 2380 int search_len;
ea4e2bc4 2381 int found = 0;
f9349a8f
FW
2382 int type, not;
2383 char *search;
2384 bool exists;
2385 int i;
ea4e2bc4
SR
2386
2387 if (ftrace_disabled)
2388 return -ENODEV;
2389
f9349a8f
FW
2390 /* decode regex */
2391 type = ftrace_setup_glob(buffer, strlen(buffer), &search, &not);
2392 if (not)
2393 return -EINVAL;
2394
2395 search_len = strlen(search);
2396
52baf119 2397 mutex_lock(&ftrace_lock);
265c831c
SR
2398 do_for_each_ftrace_rec(pg, rec) {
2399
f9349a8f
FW
2400 if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
2401 break;
2402
265c831c
SR
2403 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
2404 continue;
2405
f9349a8f
FW
2406 if (ftrace_match_record(rec, search, search_len, type)) {
2407 /* ensure it is not already in the array */
2408 exists = false;
2409 for (i = 0; i < *idx; i++)
2410 if (array[i] == rec->ip) {
2411 exists = true;
265c831c
SR
2412 break;
2413 }
f9349a8f
FW
2414 if (!exists) {
2415 array[(*idx)++] = rec->ip;
2416 found = 1;
2417 }
ea4e2bc4 2418 }
265c831c 2419 } while_for_each_ftrace_rec();
f9349a8f 2420
52baf119 2421 mutex_unlock(&ftrace_lock);
ea4e2bc4
SR
2422
2423 return found ? 0 : -EINVAL;
2424}
2425
2426static ssize_t
2427ftrace_graph_write(struct file *file, const char __user *ubuf,
2428 size_t cnt, loff_t *ppos)
2429{
2430 unsigned char buffer[FTRACE_BUFF_MAX+1];
2431 unsigned long *array;
2432 size_t read = 0;
2433 ssize_t ret;
2434 int index = 0;
2435 char ch;
2436
2437 if (!cnt || cnt < 0)
2438 return 0;
2439
2440 mutex_lock(&graph_lock);
2441
2442 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
2443 ret = -EBUSY;
2444 goto out;
2445 }
2446
2447 if (file->f_mode & FMODE_READ) {
2448 struct seq_file *m = file->private_data;
2449 array = m->private;
2450 } else
2451 array = file->private_data;
2452
2453 ret = get_user(ch, ubuf++);
2454 if (ret)
2455 goto out;
2456 read++;
2457 cnt--;
2458
2459 /* skip white space */
2460 while (cnt && isspace(ch)) {
2461 ret = get_user(ch, ubuf++);
2462 if (ret)
2463 goto out;
2464 read++;
2465 cnt--;
2466 }
2467
2468 if (isspace(ch)) {
2469 *ppos += read;
2470 ret = read;
2471 goto out;
2472 }
2473
2474 while (cnt && !isspace(ch)) {
2475 if (index < FTRACE_BUFF_MAX)
2476 buffer[index++] = ch;
2477 else {
2478 ret = -EINVAL;
2479 goto out;
2480 }
2481 ret = get_user(ch, ubuf++);
2482 if (ret)
2483 goto out;
2484 read++;
2485 cnt--;
2486 }
2487 buffer[index] = 0;
2488
f9349a8f
FW
2489 /* we allow only one expression at a time */
2490 ret = ftrace_set_func(array, &ftrace_graph_count, buffer);
ea4e2bc4
SR
2491 if (ret)
2492 goto out;
2493
ea4e2bc4
SR
2494 file->f_pos += read;
2495
2496 ret = read;
2497 out:
2498 mutex_unlock(&graph_lock);
2499
2500 return ret;
2501}
2502
2503static const struct file_operations ftrace_graph_fops = {
2504 .open = ftrace_graph_open,
850a80cf 2505 .read = seq_read,
ea4e2bc4
SR
2506 .write = ftrace_graph_write,
2507};
2508#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2509
df4fc315 2510static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
5072c59f 2511{
5072c59f
SR
2512 struct dentry *entry;
2513
5072c59f
SR
2514 entry = debugfs_create_file("available_filter_functions", 0444,
2515 d_tracer, NULL, &ftrace_avail_fops);
2516 if (!entry)
2517 pr_warning("Could not create debugfs "
2518 "'available_filter_functions' entry\n");
2519
eb9a7bf0
AS
2520 entry = debugfs_create_file("failures", 0444,
2521 d_tracer, NULL, &ftrace_failures_fops);
2522 if (!entry)
2523 pr_warning("Could not create debugfs 'failures' entry\n");
2524
5072c59f
SR
2525 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
2526 NULL, &ftrace_filter_fops);
2527 if (!entry)
2528 pr_warning("Could not create debugfs "
2529 "'set_ftrace_filter' entry\n");
41c52c0d
SR
2530
2531 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
2532 NULL, &ftrace_notrace_fops);
2533 if (!entry)
2534 pr_warning("Could not create debugfs "
2535 "'set_ftrace_notrace' entry\n");
ad90c0e3 2536
ea4e2bc4
SR
2537#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2538 entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
2539 NULL,
2540 &ftrace_graph_fops);
2541 if (!entry)
2542 pr_warning("Could not create debugfs "
2543 "'set_graph_function' entry\n");
2544#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2545
5072c59f
SR
2546 return 0;
2547}
2548
31e88909
SR
2549static int ftrace_convert_nops(struct module *mod,
2550 unsigned long *start,
68bf21aa
SR
2551 unsigned long *end)
2552{
2553 unsigned long *p;
2554 unsigned long addr;
2555 unsigned long flags;
2556
e6ea44e9 2557 mutex_lock(&ftrace_lock);
68bf21aa
SR
2558 p = start;
2559 while (p < end) {
2560 addr = ftrace_call_adjust(*p++);
20e5227e
SR
2561 /*
2562 * Some architecture linkers will pad between
2563 * the different mcount_loc sections of different
2564 * object files to satisfy alignments.
2565 * Skip any NULL pointers.
2566 */
2567 if (!addr)
2568 continue;
68bf21aa 2569 ftrace_record_ip(addr);
68bf21aa
SR
2570 }
2571
08f5ac90 2572 /* disable interrupts to prevent kstop machine */
68bf21aa 2573 local_irq_save(flags);
31e88909 2574 ftrace_update_code(mod);
68bf21aa 2575 local_irq_restore(flags);
e6ea44e9 2576 mutex_unlock(&ftrace_lock);
68bf21aa
SR
2577
2578 return 0;
2579}
2580
31e88909
SR
2581void ftrace_init_module(struct module *mod,
2582 unsigned long *start, unsigned long *end)
90d595fe 2583{
00fd61ae 2584 if (ftrace_disabled || start == end)
fed1939c 2585 return;
31e88909 2586 ftrace_convert_nops(mod, start, end);
90d595fe
SR
2587}
2588
68bf21aa
SR
2589extern unsigned long __start_mcount_loc[];
2590extern unsigned long __stop_mcount_loc[];
2591
2592void __init ftrace_init(void)
2593{
2594 unsigned long count, addr, flags;
2595 int ret;
2596
2597 /* Keep the ftrace pointer to the stub */
2598 addr = (unsigned long)ftrace_stub;
2599
2600 local_irq_save(flags);
2601 ftrace_dyn_arch_init(&addr);
2602 local_irq_restore(flags);
2603
2604 /* ftrace_dyn_arch_init places the return code in addr */
2605 if (addr)
2606 goto failed;
2607
2608 count = __stop_mcount_loc - __start_mcount_loc;
2609
2610 ret = ftrace_dyn_table_alloc(count);
2611 if (ret)
2612 goto failed;
2613
2614 last_ftrace_enabled = ftrace_enabled = 1;
2615
31e88909
SR
2616 ret = ftrace_convert_nops(NULL,
2617 __start_mcount_loc,
68bf21aa
SR
2618 __stop_mcount_loc);
2619
2620 return;
2621 failed:
2622 ftrace_disabled = 1;
2623}
68bf21aa 2624
3d083395 2625#else
0b6e4d56
FW
2626
2627static int __init ftrace_nodyn_init(void)
2628{
2629 ftrace_enabled = 1;
2630 return 0;
2631}
2632device_initcall(ftrace_nodyn_init);
2633
df4fc315
SR
2634static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
2635static inline void ftrace_startup_enable(int command) { }
5a45cfe1
SR
2636/* Keep as macros so we do not need to define the commands */
2637# define ftrace_startup(command) do { } while (0)
2638# define ftrace_shutdown(command) do { } while (0)
c7aafc54
IM
2639# define ftrace_startup_sysctl() do { } while (0)
2640# define ftrace_shutdown_sysctl() do { } while (0)
3d083395
SR
2641#endif /* CONFIG_DYNAMIC_FTRACE */
2642
df4fc315
SR
2643static ssize_t
2644ftrace_pid_read(struct file *file, char __user *ubuf,
2645 size_t cnt, loff_t *ppos)
2646{
2647 char buf[64];
2648 int r;
2649
e32d8956
SR
2650 if (ftrace_pid_trace == ftrace_swapper_pid)
2651 r = sprintf(buf, "swapper tasks\n");
2652 else if (ftrace_pid_trace)
cc59c9e8 2653 r = sprintf(buf, "%u\n", pid_vnr(ftrace_pid_trace));
df4fc315
SR
2654 else
2655 r = sprintf(buf, "no pid\n");
2656
2657 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2658}
2659
e32d8956 2660static void clear_ftrace_swapper(void)
978f3a45
SR
2661{
2662 struct task_struct *p;
e32d8956 2663 int cpu;
978f3a45 2664
e32d8956
SR
2665 get_online_cpus();
2666 for_each_online_cpu(cpu) {
2667 p = idle_task(cpu);
978f3a45 2668 clear_tsk_trace_trace(p);
e32d8956
SR
2669 }
2670 put_online_cpus();
2671}
978f3a45 2672
e32d8956
SR
2673static void set_ftrace_swapper(void)
2674{
2675 struct task_struct *p;
2676 int cpu;
2677
2678 get_online_cpus();
2679 for_each_online_cpu(cpu) {
2680 p = idle_task(cpu);
2681 set_tsk_trace_trace(p);
2682 }
2683 put_online_cpus();
978f3a45
SR
2684}
2685
e32d8956
SR
2686static void clear_ftrace_pid(struct pid *pid)
2687{
2688 struct task_struct *p;
2689
229c4ef8 2690 rcu_read_lock();
e32d8956
SR
2691 do_each_pid_task(pid, PIDTYPE_PID, p) {
2692 clear_tsk_trace_trace(p);
2693 } while_each_pid_task(pid, PIDTYPE_PID, p);
229c4ef8
ON
2694 rcu_read_unlock();
2695
e32d8956
SR
2696 put_pid(pid);
2697}
2698
2699static void set_ftrace_pid(struct pid *pid)
978f3a45
SR
2700{
2701 struct task_struct *p;
2702
229c4ef8 2703 rcu_read_lock();
978f3a45
SR
2704 do_each_pid_task(pid, PIDTYPE_PID, p) {
2705 set_tsk_trace_trace(p);
2706 } while_each_pid_task(pid, PIDTYPE_PID, p);
229c4ef8 2707 rcu_read_unlock();
978f3a45
SR
2708}
2709
e32d8956
SR
2710static void clear_ftrace_pid_task(struct pid **pid)
2711{
2712 if (*pid == ftrace_swapper_pid)
2713 clear_ftrace_swapper();
2714 else
2715 clear_ftrace_pid(*pid);
2716
2717 *pid = NULL;
2718}
2719
2720static void set_ftrace_pid_task(struct pid *pid)
2721{
2722 if (pid == ftrace_swapper_pid)
2723 set_ftrace_swapper();
2724 else
2725 set_ftrace_pid(pid);
2726}
2727
df4fc315
SR
2728static ssize_t
2729ftrace_pid_write(struct file *filp, const char __user *ubuf,
2730 size_t cnt, loff_t *ppos)
2731{
978f3a45 2732 struct pid *pid;
df4fc315
SR
2733 char buf[64];
2734 long val;
2735 int ret;
2736
2737 if (cnt >= sizeof(buf))
2738 return -EINVAL;
2739
2740 if (copy_from_user(&buf, ubuf, cnt))
2741 return -EFAULT;
2742
2743 buf[cnt] = 0;
2744
2745 ret = strict_strtol(buf, 10, &val);
2746 if (ret < 0)
2747 return ret;
2748
e6ea44e9 2749 mutex_lock(&ftrace_lock);
978f3a45 2750 if (val < 0) {
df4fc315 2751 /* disable pid tracing */
978f3a45 2752 if (!ftrace_pid_trace)
df4fc315 2753 goto out;
978f3a45
SR
2754
2755 clear_ftrace_pid_task(&ftrace_pid_trace);
df4fc315
SR
2756
2757 } else {
e32d8956
SR
2758 /* swapper task is special */
2759 if (!val) {
2760 pid = ftrace_swapper_pid;
2761 if (pid == ftrace_pid_trace)
2762 goto out;
2763 } else {
2764 pid = find_get_pid(val);
df4fc315 2765
e32d8956
SR
2766 if (pid == ftrace_pid_trace) {
2767 put_pid(pid);
2768 goto out;
2769 }
0ef8cde5 2770 }
0ef8cde5 2771
978f3a45
SR
2772 if (ftrace_pid_trace)
2773 clear_ftrace_pid_task(&ftrace_pid_trace);
2774
2775 if (!pid)
2776 goto out;
2777
2778 ftrace_pid_trace = pid;
2779
2780 set_ftrace_pid_task(ftrace_pid_trace);
df4fc315
SR
2781 }
2782
2783 /* update the function call */
2784 ftrace_update_pid_func();
2785 ftrace_startup_enable(0);
2786
2787 out:
e6ea44e9 2788 mutex_unlock(&ftrace_lock);
df4fc315
SR
2789
2790 return cnt;
2791}
2792
5e2336a0 2793static const struct file_operations ftrace_pid_fops = {
df4fc315
SR
2794 .read = ftrace_pid_read,
2795 .write = ftrace_pid_write,
2796};
2797
2798static __init int ftrace_init_debugfs(void)
2799{
2800 struct dentry *d_tracer;
2801 struct dentry *entry;
2802
2803 d_tracer = tracing_init_dentry();
2804 if (!d_tracer)
2805 return 0;
2806
2807 ftrace_init_dyn_debugfs(d_tracer);
2808
2809 entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
2810 NULL, &ftrace_pid_fops);
2811 if (!entry)
2812 pr_warning("Could not create debugfs "
2813 "'set_ftrace_pid' entry\n");
493762fc
SR
2814
2815 ftrace_profile_debugfs(d_tracer);
2816
df4fc315
SR
2817 return 0;
2818}
df4fc315
SR
2819fs_initcall(ftrace_init_debugfs);
2820
a2bb6a3d 2821/**
81adbdc0 2822 * ftrace_kill - kill ftrace
a2bb6a3d
SR
2823 *
2824 * This function should be used by panic code. It stops ftrace
2825 * but in a not so nice way. If you need to simply kill ftrace
2826 * from a non-atomic section, use ftrace_kill.
2827 */
81adbdc0 2828void ftrace_kill(void)
a2bb6a3d
SR
2829{
2830 ftrace_disabled = 1;
2831 ftrace_enabled = 0;
a2bb6a3d
SR
2832 clear_ftrace_function();
2833}
2834
16444a8a 2835/**
3d083395
SR
2836 * register_ftrace_function - register a function for profiling
2837 * @ops - ops structure that holds the function for profiling.
16444a8a 2838 *
3d083395
SR
2839 * Register a function to be called by all functions in the
2840 * kernel.
2841 *
2842 * Note: @ops->func and all the functions it calls must be labeled
2843 * with "notrace", otherwise it will go into a
2844 * recursive loop.
16444a8a 2845 */
3d083395 2846int register_ftrace_function(struct ftrace_ops *ops)
16444a8a 2847{
b0fc494f
SR
2848 int ret;
2849
4eebcc81
SR
2850 if (unlikely(ftrace_disabled))
2851 return -1;
2852
e6ea44e9 2853 mutex_lock(&ftrace_lock);
e7d3737e 2854
b0fc494f 2855 ret = __register_ftrace_function(ops);
5a45cfe1 2856 ftrace_startup(0);
b0fc494f 2857
e6ea44e9 2858 mutex_unlock(&ftrace_lock);
b0fc494f 2859 return ret;
3d083395
SR
2860}
2861
2862/**
32632920 2863 * unregister_ftrace_function - unregister a function for profiling.
3d083395
SR
2864 * @ops - ops structure that holds the function to unregister
2865 *
2866 * Unregister a function that was added to be called by ftrace profiling.
2867 */
2868int unregister_ftrace_function(struct ftrace_ops *ops)
2869{
2870 int ret;
2871
e6ea44e9 2872 mutex_lock(&ftrace_lock);
3d083395 2873 ret = __unregister_ftrace_function(ops);
5a45cfe1 2874 ftrace_shutdown(0);
e6ea44e9 2875 mutex_unlock(&ftrace_lock);
b0fc494f
SR
2876
2877 return ret;
2878}
2879
e309b41d 2880int
b0fc494f 2881ftrace_enable_sysctl(struct ctl_table *table, int write,
5072c59f 2882 struct file *file, void __user *buffer, size_t *lenp,
b0fc494f
SR
2883 loff_t *ppos)
2884{
2885 int ret;
2886
4eebcc81
SR
2887 if (unlikely(ftrace_disabled))
2888 return -ENODEV;
2889
e6ea44e9 2890 mutex_lock(&ftrace_lock);
b0fc494f 2891
5072c59f 2892 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
b0fc494f
SR
2893
2894 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
2895 goto out;
2896
2897 last_ftrace_enabled = ftrace_enabled;
2898
2899 if (ftrace_enabled) {
2900
2901 ftrace_startup_sysctl();
2902
2903 /* we are starting ftrace again */
2904 if (ftrace_list != &ftrace_list_end) {
2905 if (ftrace_list->next == &ftrace_list_end)
2906 ftrace_trace_function = ftrace_list->func;
2907 else
2908 ftrace_trace_function = ftrace_list_func;
2909 }
2910
2911 } else {
2912 /* stopping ftrace calls (just send to ftrace_stub) */
2913 ftrace_trace_function = ftrace_stub;
2914
2915 ftrace_shutdown_sysctl();
2916 }
2917
2918 out:
e6ea44e9 2919 mutex_unlock(&ftrace_lock);
3d083395 2920 return ret;
16444a8a 2921}
f17845e5 2922
fb52607a 2923#ifdef CONFIG_FUNCTION_GRAPH_TRACER
e7d3737e 2924
287b6e68 2925static atomic_t ftrace_graph_active;
4a2b8dda 2926static struct notifier_block ftrace_suspend_notifier;
e7d3737e 2927
e49dc19c
SR
2928int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
2929{
2930 return 0;
2931}
2932
287b6e68
FW
2933/* The callbacks that hook a function */
2934trace_func_graph_ret_t ftrace_graph_return =
2935 (trace_func_graph_ret_t)ftrace_stub;
e49dc19c 2936trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
f201ae23
FW
2937
2938/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
2939static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
2940{
2941 int i;
2942 int ret = 0;
2943 unsigned long flags;
2944 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
2945 struct task_struct *g, *t;
2946
2947 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
2948 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
2949 * sizeof(struct ftrace_ret_stack),
2950 GFP_KERNEL);
2951 if (!ret_stack_list[i]) {
2952 start = 0;
2953 end = i;
2954 ret = -ENOMEM;
2955 goto free;
2956 }
2957 }
2958
2959 read_lock_irqsave(&tasklist_lock, flags);
2960 do_each_thread(g, t) {
2961 if (start == end) {
2962 ret = -EAGAIN;
2963 goto unlock;
2964 }
2965
2966 if (t->ret_stack == NULL) {
f201ae23 2967 t->curr_ret_stack = -1;
48d68b20
FW
2968 /* Make sure IRQs see the -1 first: */
2969 barrier();
2970 t->ret_stack = ret_stack_list[start++];
380c4b14 2971 atomic_set(&t->tracing_graph_pause, 0);
f201ae23
FW
2972 atomic_set(&t->trace_overrun, 0);
2973 }
2974 } while_each_thread(g, t);
2975
2976unlock:
2977 read_unlock_irqrestore(&tasklist_lock, flags);
2978free:
2979 for (i = start; i < end; i++)
2980 kfree(ret_stack_list[i]);
2981 return ret;
2982}
2983
8aef2d28
SR
2984static void
2985ftrace_graph_probe_sched_switch(struct rq *__rq, struct task_struct *prev,
2986 struct task_struct *next)
2987{
2988 unsigned long long timestamp;
2989 int index;
2990
be6f164a
SR
2991 /*
2992 * Does the user want to count the time a function was asleep.
2993 * If so, do not update the time stamps.
2994 */
2995 if (trace_flags & TRACE_ITER_SLEEP_TIME)
2996 return;
2997
8aef2d28
SR
2998 timestamp = trace_clock_local();
2999
3000 prev->ftrace_timestamp = timestamp;
3001
3002 /* only process tasks that we timestamped */
3003 if (!next->ftrace_timestamp)
3004 return;
3005
3006 /*
3007 * Update all the counters in next to make up for the
3008 * time next was sleeping.
3009 */
3010 timestamp -= next->ftrace_timestamp;
3011
3012 for (index = next->curr_ret_stack; index >= 0; index--)
3013 next->ret_stack[index].calltime += timestamp;
3014}
3015
f201ae23 3016/* Allocate a return stack for each task */
fb52607a 3017static int start_graph_tracing(void)
f201ae23
FW
3018{
3019 struct ftrace_ret_stack **ret_stack_list;
5b058bcd 3020 int ret, cpu;
f201ae23
FW
3021
3022 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
3023 sizeof(struct ftrace_ret_stack *),
3024 GFP_KERNEL);
3025
3026 if (!ret_stack_list)
3027 return -ENOMEM;
3028
5b058bcd
FW
3029 /* The cpu_boot init_task->ret_stack will never be freed */
3030 for_each_online_cpu(cpu)
3031 ftrace_graph_init_task(idle_task(cpu));
3032
f201ae23
FW
3033 do {
3034 ret = alloc_retstack_tasklist(ret_stack_list);
3035 } while (ret == -EAGAIN);
3036
8aef2d28
SR
3037 if (!ret) {
3038 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch);
3039 if (ret)
3040 pr_info("ftrace_graph: Couldn't activate tracepoint"
3041 " probe to kernel_sched_switch\n");
3042 }
3043
f201ae23
FW
3044 kfree(ret_stack_list);
3045 return ret;
3046}
3047
4a2b8dda
FW
3048/*
3049 * Hibernation protection.
3050 * The state of the current task is too much unstable during
3051 * suspend/restore to disk. We want to protect against that.
3052 */
3053static int
3054ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
3055 void *unused)
3056{
3057 switch (state) {
3058 case PM_HIBERNATION_PREPARE:
3059 pause_graph_tracing();
3060 break;
3061
3062 case PM_POST_HIBERNATION:
3063 unpause_graph_tracing();
3064 break;
3065 }
3066 return NOTIFY_DONE;
3067}
3068
287b6e68
FW
3069int register_ftrace_graph(trace_func_graph_ret_t retfunc,
3070 trace_func_graph_ent_t entryfunc)
15e6cb36 3071{
e7d3737e
FW
3072 int ret = 0;
3073
e6ea44e9 3074 mutex_lock(&ftrace_lock);
e7d3737e 3075
05ce5818
SR
3076 /* we currently allow only one tracer registered at a time */
3077 if (atomic_read(&ftrace_graph_active)) {
3078 ret = -EBUSY;
3079 goto out;
3080 }
3081
4a2b8dda
FW
3082 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
3083 register_pm_notifier(&ftrace_suspend_notifier);
3084
287b6e68 3085 atomic_inc(&ftrace_graph_active);
fb52607a 3086 ret = start_graph_tracing();
f201ae23 3087 if (ret) {
287b6e68 3088 atomic_dec(&ftrace_graph_active);
f201ae23
FW
3089 goto out;
3090 }
e53a6319 3091
287b6e68
FW
3092 ftrace_graph_return = retfunc;
3093 ftrace_graph_entry = entryfunc;
e53a6319 3094
5a45cfe1 3095 ftrace_startup(FTRACE_START_FUNC_RET);
e7d3737e
FW
3096
3097out:
e6ea44e9 3098 mutex_unlock(&ftrace_lock);
e7d3737e 3099 return ret;
15e6cb36
FW
3100}
3101
fb52607a 3102void unregister_ftrace_graph(void)
15e6cb36 3103{
e6ea44e9 3104 mutex_lock(&ftrace_lock);
e7d3737e 3105
287b6e68 3106 atomic_dec(&ftrace_graph_active);
8aef2d28 3107 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch);
287b6e68 3108 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
e49dc19c 3109 ftrace_graph_entry = ftrace_graph_entry_stub;
5a45cfe1 3110 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
4a2b8dda 3111 unregister_pm_notifier(&ftrace_suspend_notifier);
e7d3737e 3112
e6ea44e9 3113 mutex_unlock(&ftrace_lock);
15e6cb36 3114}
f201ae23
FW
3115
3116/* Allocate a return stack for newly created task */
fb52607a 3117void ftrace_graph_init_task(struct task_struct *t)
f201ae23 3118{
287b6e68 3119 if (atomic_read(&ftrace_graph_active)) {
f201ae23
FW
3120 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
3121 * sizeof(struct ftrace_ret_stack),
3122 GFP_KERNEL);
3123 if (!t->ret_stack)
3124 return;
3125 t->curr_ret_stack = -1;
380c4b14 3126 atomic_set(&t->tracing_graph_pause, 0);
f201ae23 3127 atomic_set(&t->trace_overrun, 0);
8aef2d28 3128 t->ftrace_timestamp = 0;
f201ae23
FW
3129 } else
3130 t->ret_stack = NULL;
3131}
3132
fb52607a 3133void ftrace_graph_exit_task(struct task_struct *t)
f201ae23 3134{
eae849ca
FW
3135 struct ftrace_ret_stack *ret_stack = t->ret_stack;
3136
f201ae23 3137 t->ret_stack = NULL;
eae849ca
FW
3138 /* NULL must become visible to IRQs before we free it: */
3139 barrier();
3140
3141 kfree(ret_stack);
f201ae23 3142}
14a866c5
SR
3143
3144void ftrace_graph_stop(void)
3145{
3146 ftrace_stop();
3147}
15e6cb36
FW
3148#endif
3149
This page took 0.234352 seconds and 5 git commands to generate.