tracing/events: move trace point headers into include/trace/events
[deliverable/linux.git] / kernel / trace / ftrace.c
index 71e5faef12ab912a7c5e7589348f04d0621ae4ce..a23488988581aab4b4638b9c7ca6f282aed6c7de 100644 (file)
@@ -29,7 +29,7 @@
 #include <linux/list.h>
 #include <linux/hash.h>
 
-#include <trace/sched.h>
+#include <trace/events/sched.h>
 
 #include <asm/ftrace.h>
 
@@ -69,7 +69,7 @@ static DEFINE_MUTEX(ftrace_lock);
 
 static struct ftrace_ops ftrace_list_end __read_mostly =
 {
-       .func = ftrace_stub,
+       .func           = ftrace_stub,
 };
 
 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
@@ -271,8 +271,10 @@ struct ftrace_profile_stat {
 #define PROFILES_PER_PAGE                                      \
        (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
 
-static int ftrace_profile_bits;
-static int ftrace_profile_enabled;
+static int ftrace_profile_bits __read_mostly;
+static int ftrace_profile_enabled __read_mostly;
+
+/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
 static DEFINE_MUTEX(ftrace_profile_lock);
 
 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
@@ -345,8 +347,10 @@ static int function_stat_cmp(void *p1, void *p2)
 static int function_stat_headers(struct seq_file *m)
 {
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       seq_printf(m, "  Function                               Hit    Time\n"
-                     "  --------                               ---    ----\n");
+       seq_printf(m, "  Function                               "
+                  "Hit    Time            Avg\n"
+                     "  --------                               "
+                  "---    ----            ---\n");
 #else
        seq_printf(m, "  Function                               Hit\n"
                      "  --------                               ---\n");
@@ -359,12 +363,9 @@ static int function_stat_show(struct seq_file *m, void *v)
        struct ftrace_profile *rec = v;
        char str[KSYM_SYMBOL_LEN];
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       static struct trace_seq s;
        static DEFINE_MUTEX(mutex);
-
-       mutex_lock(&mutex);
-       trace_seq_init(&s);
-       trace_print_graph_duration(rec->time, &s);
+       static struct trace_seq s;
+       unsigned long long avg;
 #endif
 
        kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
@@ -372,6 +373,14 @@ static int function_stat_show(struct seq_file *m, void *v)
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
        seq_printf(m, "    ");
+       avg = rec->time;
+       do_div(avg, rec->counter);
+
+       mutex_lock(&mutex);
+       trace_seq_init(&s);
+       trace_print_graph_duration(rec->time, &s);
+       trace_seq_puts(&s, "    ");
+       trace_print_graph_duration(avg, &s);
        trace_print_seq(m, &s);
        mutex_unlock(&mutex);
 #endif
@@ -399,6 +408,8 @@ static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
 {
        struct ftrace_profile_page *pg;
+       int functions;
+       int pages;
        int i;
 
        /* If we already allocated, do nothing */
@@ -409,22 +420,46 @@ int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
        if (!stat->pages)
                return -ENOMEM;
 
+#ifdef CONFIG_DYNAMIC_FTRACE
+       functions = ftrace_update_tot_cnt;
+#else
+       /*
+        * We do not know the number of functions that exist because
+        * dynamic tracing is what counts them. With past experience
+        * we have around 20K functions. That should be more than enough.
+        * It is highly unlikely we will execute every function in
+        * the kernel.
+        */
+       functions = 20000;
+#endif
+
        pg = stat->start = stat->pages;
 
-       /* allocate 10 more pages to start */
-       for (i = 0; i < 10; i++) {
+       pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
+
+       for (i = 0; i < pages; i++) {
                pg->next = (void *)get_zeroed_page(GFP_KERNEL);
-               /*
-                * We only care about allocating profile_pages, if
-                * we failed to allocate here, hopefully we will allocate
-                * later.
-                */
                if (!pg->next)
-                       break;
+                       goto out_free;
                pg = pg->next;
        }
 
        return 0;
+
+ out_free:
+       pg = stat->start;
+       while (pg) {
+               unsigned long tmp = (unsigned long)pg;
+
+               pg = pg->next;
+               free_page(tmp);
+       }
+
+       free_page((unsigned long)stat->pages);
+       stat->pages = NULL;
+       stat->start = NULL;
+
+       return -ENOMEM;
 }
 
 static int ftrace_profile_init_cpu(int cpu)
@@ -458,7 +493,7 @@ static int ftrace_profile_init_cpu(int cpu)
                        ftrace_profile_bits++;
        }
 
-       /* Preallocate a few pages */
+       /* Preallocate the function profiling pages */
        if (ftrace_profile_pages_init(stat) < 0) {
                kfree(stat->hash);
                stat->hash = NULL;
@@ -514,24 +549,21 @@ static void ftrace_add_profile(struct ftrace_profile_stat *stat,
        hlist_add_head_rcu(&rec->node, &stat->hash[key]);
 }
 
-/* Interrupts must be disabled calling this */
+/*
+ * The memory is already allocated, this simply finds a new record to use.
+ */
 static struct ftrace_profile *
-ftrace_profile_alloc(struct ftrace_profile_stat *stat,
-                    unsigned long ip, bool alloc_safe)
+ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
 {
        struct ftrace_profile *rec = NULL;
 
-       /* prevent recursion */
+       /* prevent recursion (from NMIs) */
        if (atomic_inc_return(&stat->disabled) != 1)
                goto out;
 
-       /* Try to always keep another page available */
-       if (!stat->pages->next && alloc_safe)
-               stat->pages->next = (void *)get_zeroed_page(GFP_ATOMIC);
-
        /*
-        * Try to find the function again since another
-        * task on another CPU could have added it
+        * Try to find the function again since an NMI
+        * could have added it
         */
        rec = ftrace_find_profiled_func(stat, ip);
        if (rec)
@@ -553,29 +585,16 @@ ftrace_profile_alloc(struct ftrace_profile_stat *stat,
        return rec;
 }
 
-/*
- * If we are not in an interrupt, or softirq and
- * and interrupts are disabled and preemption is not enabled
- * (not in a spinlock) then it should be safe to allocate memory.
- */
-static bool ftrace_safe_to_allocate(void)
-{
-       return !in_interrupt() && irqs_disabled() && !preempt_count();
-}
-
 static void
 function_profile_call(unsigned long ip, unsigned long parent_ip)
 {
        struct ftrace_profile_stat *stat;
        struct ftrace_profile *rec;
        unsigned long flags;
-       bool alloc_safe;
 
        if (!ftrace_profile_enabled)
                return;
 
-       alloc_safe = ftrace_safe_to_allocate();
-
        local_irq_save(flags);
 
        stat = &__get_cpu_var(ftrace_profile_stats);
@@ -584,7 +603,7 @@ function_profile_call(unsigned long ip, unsigned long parent_ip)
 
        rec = ftrace_find_profiled_func(stat, ip);
        if (!rec) {
-               rec = ftrace_profile_alloc(stat, ip, alloc_safe);
+               rec = ftrace_profile_alloc(stat, ip);
                if (!rec)
                        goto out;
        }
@@ -651,7 +670,7 @@ static void unregister_ftrace_profiler(void)
 #else
 static struct ftrace_ops ftrace_profile_ops __read_mostly =
 {
-       .func = function_profile_call,
+       .func           = function_profile_call,
 };
 
 static int register_ftrace_profiler(void)
@@ -670,7 +689,7 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf,
                     size_t cnt, loff_t *ppos)
 {
        unsigned long val;
-       char buf[64];
+       char buf[64];           /* big enough to hold a number */
        int ret;
 
        if (cnt >= sizeof(buf))
@@ -719,7 +738,7 @@ static ssize_t
 ftrace_profile_read(struct file *filp, char __user *ubuf,
                     size_t cnt, loff_t *ppos)
 {
-       char buf[64];
+       char buf[64];           /* big enough to hold a number */
        int r;
 
        r = sprintf(buf, "%u\n", ftrace_profile_enabled);
@@ -734,12 +753,12 @@ static const struct file_operations ftrace_profile_fops = {
 
 /* used to initialize the real stat files */
 static struct tracer_stat function_stats __initdata = {
-       .name = "functions",
-       .stat_start = function_stat_start,
-       .stat_next = function_stat_next,
-       .stat_cmp = function_stat_cmp,
-       .stat_headers = function_stat_headers,
-       .stat_show = function_stat_show
+       .name           = "functions",
+       .stat_start     = function_stat_start,
+       .stat_next      = function_stat_next,
+       .stat_cmp       = function_stat_cmp,
+       .stat_headers   = function_stat_headers,
+       .stat_show      = function_stat_show
 };
 
 static void ftrace_profile_debugfs(struct dentry *d_tracer)
@@ -909,9 +928,14 @@ void ftrace_release(void *start, unsigned long size)
 
        mutex_lock(&ftrace_lock);
        do_for_each_ftrace_rec(pg, rec) {
-               if ((rec->ip >= s) && (rec->ip < e) &&
-                   !(rec->flags & FTRACE_FL_FREE))
+               if ((rec->ip >= s) && (rec->ip < e)) {
+                       /*
+                        * rec->ip is changed in ftrace_free_rec()
+                        * It should not between s and e if record was freed.
+                        */
+                       FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
                        ftrace_free_rec(rec);
+               }
        } while_for_each_ftrace_rec();
        mutex_unlock(&ftrace_lock);
 }
@@ -1954,7 +1978,7 @@ function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
 
 static struct ftrace_ops trace_probe_ops __read_mostly =
 {
-       .func = function_trace_probe_call,
+       .func           = function_trace_probe_call,
 };
 
 static int ftrace_probe_registered;
@@ -2674,38 +2698,23 @@ static const struct file_operations ftrace_graph_fops = {
 
 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
 {
-       struct dentry *entry;
 
-       entry = debugfs_create_file("available_filter_functions", 0444,
-                                   d_tracer, NULL, &ftrace_avail_fops);
-       if (!entry)
-               pr_warning("Could not create debugfs "
-                          "'available_filter_functions' entry\n");
+       trace_create_file("available_filter_functions", 0444,
+                       d_tracer, NULL, &ftrace_avail_fops);
 
-       entry = debugfs_create_file("failures", 0444,
-                                   d_tracer, NULL, &ftrace_failures_fops);
-       if (!entry)
-               pr_warning("Could not create debugfs 'failures' entry\n");
+       trace_create_file("failures", 0444,
+                       d_tracer, NULL, &ftrace_failures_fops);
 
-       entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
-                                   NULL, &ftrace_filter_fops);
-       if (!entry)
-               pr_warning("Could not create debugfs "
-                          "'set_ftrace_filter' entry\n");
+       trace_create_file("set_ftrace_filter", 0644, d_tracer,
+                       NULL, &ftrace_filter_fops);
 
-       entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
+       trace_create_file("set_ftrace_notrace", 0644, d_tracer,
                                    NULL, &ftrace_notrace_fops);
-       if (!entry)
-               pr_warning("Could not create debugfs "
-                          "'set_ftrace_notrace' entry\n");
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
+       trace_create_file("set_graph_function", 0444, d_tracer,
                                    NULL,
                                    &ftrace_graph_fops);
-       if (!entry)
-               pr_warning("Could not create debugfs "
-                          "'set_graph_function' entry\n");
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
        return 0;
@@ -2963,7 +2972,6 @@ static const struct file_operations ftrace_pid_fops = {
 static __init int ftrace_init_debugfs(void)
 {
        struct dentry *d_tracer;
-       struct dentry *entry;
 
        d_tracer = tracing_init_dentry();
        if (!d_tracer)
@@ -2971,11 +2979,8 @@ static __init int ftrace_init_debugfs(void)
 
        ftrace_init_dyn_debugfs(d_tracer);
 
-       entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
-                                   NULL, &ftrace_pid_fops);
-       if (!entry)
-               pr_warning("Could not create debugfs "
-                          "'set_ftrace_pid' entry\n");
+       trace_create_file("set_ftrace_pid", 0644, d_tracer,
+                           NULL, &ftrace_pid_fops);
 
        ftrace_profile_debugfs(d_tracer);
 
@@ -3087,7 +3092,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 
-static atomic_t ftrace_graph_active;
+static int ftrace_graph_active;
 static struct notifier_block ftrace_suspend_notifier;
 
 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
@@ -3239,7 +3244,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
        mutex_lock(&ftrace_lock);
 
        /* we currently allow only one tracer registered at a time */
-       if (atomic_read(&ftrace_graph_active)) {
+       if (ftrace_graph_active) {
                ret = -EBUSY;
                goto out;
        }
@@ -3247,10 +3252,10 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
        ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
        register_pm_notifier(&ftrace_suspend_notifier);
 
-       atomic_inc(&ftrace_graph_active);
+       ftrace_graph_active++;
        ret = start_graph_tracing();
        if (ret) {
-               atomic_dec(&ftrace_graph_active);
+               ftrace_graph_active--;
                goto out;
        }
 
@@ -3268,20 +3273,24 @@ void unregister_ftrace_graph(void)
 {
        mutex_lock(&ftrace_lock);
 
-       atomic_dec(&ftrace_graph_active);
+       if (unlikely(!ftrace_graph_active))
+               goto out;
+
+       ftrace_graph_active--;
        unregister_trace_sched_switch(ftrace_graph_probe_sched_switch);
        ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
        ftrace_graph_entry = ftrace_graph_entry_stub;
        ftrace_shutdown(FTRACE_STOP_FUNC_RET);
        unregister_pm_notifier(&ftrace_suspend_notifier);
 
+ out:
        mutex_unlock(&ftrace_lock);
 }
 
 /* Allocate a return stack for newly created task */
 void ftrace_graph_init_task(struct task_struct *t)
 {
-       if (atomic_read(&ftrace_graph_active)) {
+       if (ftrace_graph_active) {
                t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
                                * sizeof(struct ftrace_ret_stack),
                                GFP_KERNEL);
This page took 0.050791 seconds and 5 git commands to generate.