tracing/events: move trace point headers into include/trace/events
[deliverable/linux.git] / kernel / trace / ftrace.c
index 24dac448cdc95974670466725e56b558baa1376a..a23488988581aab4b4638b9c7ca6f282aed6c7de 100644 (file)
 #include <linux/list.h>
 #include <linux/hash.h>
 
-#include <trace/sched.h>
+#include <trace/events/sched.h>
 
 #include <asm/ftrace.h>
 
-#include "trace.h"
+#include "trace_output.h"
 #include "trace_stat.h"
 
 #define FTRACE_WARN_ON(cond)                   \
@@ -69,7 +69,7 @@ static DEFINE_MUTEX(ftrace_lock);
 
 static struct ftrace_ops ftrace_list_end __read_mostly =
 {
-       .func = ftrace_stub,
+       .func           = ftrace_stub,
 };
 
 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
@@ -246,6 +246,9 @@ struct ftrace_profile {
        struct hlist_node               node;
        unsigned long                   ip;
        unsigned long                   counter;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       unsigned long long              time;
+#endif
 };
 
 struct ftrace_profile_page {
@@ -254,28 +257,30 @@ struct ftrace_profile_page {
        struct ftrace_profile           records[];
 };
 
+struct ftrace_profile_stat {
+       atomic_t                        disabled;
+       struct hlist_head               *hash;
+       struct ftrace_profile_page      *pages;
+       struct ftrace_profile_page      *start;
+       struct tracer_stat              stat;
+};
+
 #define PROFILE_RECORDS_SIZE                                           \
        (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
 
 #define PROFILES_PER_PAGE                                      \
        (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
 
-/* TODO: make these percpu, to prevent cache line bouncing */
-static struct ftrace_profile_page *profile_pages_start;
-static struct ftrace_profile_page *profile_pages;
+static int ftrace_profile_bits __read_mostly;
+static int ftrace_profile_enabled __read_mostly;
 
-static struct hlist_head *ftrace_profile_hash;
-static int ftrace_profile_bits;
-static int ftrace_profile_enabled;
+/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
 static DEFINE_MUTEX(ftrace_profile_lock);
 
-static DEFINE_PER_CPU(atomic_t, ftrace_profile_disable);
+static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
 
 #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
 
-static raw_spinlock_t ftrace_profile_rec_lock =
-       (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
-
 static void *
 function_stat_next(void *v, int idx)
 {
@@ -300,9 +305,31 @@ function_stat_next(void *v, int idx)
 
 static void *function_stat_start(struct tracer_stat *trace)
 {
-       return function_stat_next(&profile_pages_start->records[0], 0);
+       struct ftrace_profile_stat *stat =
+               container_of(trace, struct ftrace_profile_stat, stat);
+
+       if (!stat || !stat->start)
+               return NULL;
+
+       return function_stat_next(&stat->start->records[0], 0);
 }
 
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+/* function graph compares on total time */
+static int function_stat_cmp(void *p1, void *p2)
+{
+       struct ftrace_profile *a = p1;
+       struct ftrace_profile *b = p2;
+
+       if (a->time < b->time)
+               return -1;
+       if (a->time > b->time)
+               return 1;
+       else
+               return 0;
+}
+#else
+/* not function graph compares against hits */
 static int function_stat_cmp(void *p1, void *p2)
 {
        struct ftrace_profile *a = p1;
@@ -315,11 +342,19 @@ static int function_stat_cmp(void *p1, void *p2)
        else
                return 0;
 }
+#endif
 
 static int function_stat_headers(struct seq_file *m)
 {
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       seq_printf(m, "  Function                               "
+                  "Hit    Time            Avg\n"
+                     "  --------                               "
+                  "---    ----            ---\n");
+#else
        seq_printf(m, "  Function                               Hit\n"
                      "  --------                               ---\n");
+#endif
        return 0;
 }
 
@@ -327,27 +362,38 @@ static int function_stat_show(struct seq_file *m, void *v)
 {
        struct ftrace_profile *rec = v;
        char str[KSYM_SYMBOL_LEN];
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       static DEFINE_MUTEX(mutex);
+       static struct trace_seq s;
+       unsigned long long avg;
+#endif
 
        kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
+       seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       seq_printf(m, "    ");
+       avg = rec->time;
+       do_div(avg, rec->counter);
+
+       mutex_lock(&mutex);
+       trace_seq_init(&s);
+       trace_print_graph_duration(rec->time, &s);
+       trace_seq_puts(&s, "    ");
+       trace_print_graph_duration(avg, &s);
+       trace_print_seq(m, &s);
+       mutex_unlock(&mutex);
+#endif
+       seq_putc(m, '\n');
 
-       seq_printf(m, "  %-30.30s  %10lu\n", str, rec->counter);
        return 0;
 }
 
-static struct tracer_stat function_stats = {
-       .name = "functions",
-       .stat_start = function_stat_start,
-       .stat_next = function_stat_next,
-       .stat_cmp = function_stat_cmp,
-       .stat_headers = function_stat_headers,
-       .stat_show = function_stat_show
-};
-
-static void ftrace_profile_reset(void)
+static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
 {
        struct ftrace_profile_page *pg;
 
-       pg = profile_pages = profile_pages_start;
+       pg = stat->pages = stat->start;
 
        while (pg) {
                memset(pg->records, 0, PROFILE_RECORDS_SIZE);
@@ -355,48 +401,77 @@ static void ftrace_profile_reset(void)
                pg = pg->next;
        }
 
-       memset(ftrace_profile_hash, 0,
+       memset(stat->hash, 0,
               FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
 }
 
-int ftrace_profile_pages_init(void)
+int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
 {
        struct ftrace_profile_page *pg;
+       int functions;
+       int pages;
        int i;
 
        /* If we already allocated, do nothing */
-       if (profile_pages)
+       if (stat->pages)
                return 0;
 
-       profile_pages = (void *)get_zeroed_page(GFP_KERNEL);
-       if (!profile_pages)
+       stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
+       if (!stat->pages)
                return -ENOMEM;
 
-       pg = profile_pages_start = profile_pages;
+#ifdef CONFIG_DYNAMIC_FTRACE
+       functions = ftrace_update_tot_cnt;
+#else
+       /*
+        * We do not know the number of functions that exist because
+        * dynamic tracing is what counts them. With past experience
+        * we have around 20K functions. That should be more than enough.
+        * It is highly unlikely we will execute every function in
+        * the kernel.
+        */
+       functions = 20000;
+#endif
+
+       pg = stat->start = stat->pages;
+
+       pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
 
-       /* allocate 10 more pages to start */
-       for (i = 0; i < 10; i++) {
+       for (i = 0; i < pages; i++) {
                pg->next = (void *)get_zeroed_page(GFP_KERNEL);
-               /*
-                * We only care about allocating profile_pages, if
-                * we failed to allocate here, hopefully we will allocate
-                * later.
-                */
                if (!pg->next)
-                       break;
+                       goto out_free;
                pg = pg->next;
        }
 
        return 0;
+
+ out_free:
+       pg = stat->start;
+       while (pg) {
+               unsigned long tmp = (unsigned long)pg;
+
+               pg = pg->next;
+               free_page(tmp);
+       }
+
+       free_page((unsigned long)stat->pages);
+       stat->pages = NULL;
+       stat->start = NULL;
+
+       return -ENOMEM;
 }
 
-static int ftrace_profile_init(void)
+static int ftrace_profile_init_cpu(int cpu)
 {
+       struct ftrace_profile_stat *stat;
        int size;
 
-       if (ftrace_profile_hash) {
+       stat = &per_cpu(ftrace_profile_stats, cpu);
+
+       if (stat->hash) {
                /* If the profile is already created, simply reset it */
-               ftrace_profile_reset();
+               ftrace_profile_reset(stat);
                return 0;
        }
 
@@ -406,29 +481,45 @@ static int ftrace_profile_init(void)
         */
        size = FTRACE_PROFILE_HASH_SIZE;
 
-       ftrace_profile_hash =
-               kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
+       stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
 
-       if (!ftrace_profile_hash)
+       if (!stat->hash)
                return -ENOMEM;
 
-       size--;
+       if (!ftrace_profile_bits) {
+               size--;
 
-       for (; size; size >>= 1)
-               ftrace_profile_bits++;
+               for (; size; size >>= 1)
+                       ftrace_profile_bits++;
+       }
 
-       /* Preallocate a few pages */
-       if (ftrace_profile_pages_init() < 0) {
-               kfree(ftrace_profile_hash);
-               ftrace_profile_hash = NULL;
+       /* Preallocate the function profiling pages */
+       if (ftrace_profile_pages_init(stat) < 0) {
+               kfree(stat->hash);
+               stat->hash = NULL;
                return -ENOMEM;
        }
 
        return 0;
 }
 
+static int ftrace_profile_init(void)
+{
+       int cpu;
+       int ret = 0;
+
+       for_each_online_cpu(cpu) {
+               ret = ftrace_profile_init_cpu(cpu);
+               if (ret)
+                       break;
+       }
+
+       return ret;
+}
+
 /* interrupts must be disabled */
-static struct ftrace_profile *ftrace_find_profiled_func(unsigned long ip)
+static struct ftrace_profile *
+ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
 {
        struct ftrace_profile *rec;
        struct hlist_head *hhd;
@@ -436,7 +527,7 @@ static struct ftrace_profile *ftrace_find_profiled_func(unsigned long ip)
        unsigned long key;
 
        key = hash_long(ip, ftrace_profile_bits);
-       hhd = &ftrace_profile_hash[key];
+       hhd = &stat->hash[key];
 
        if (hlist_empty(hhd))
                return NULL;
@@ -449,82 +540,70 @@ static struct ftrace_profile *ftrace_find_profiled_func(unsigned long ip)
        return NULL;
 }
 
-static void ftrace_add_profile(struct ftrace_profile *rec)
+static void ftrace_add_profile(struct ftrace_profile_stat *stat,
+                              struct ftrace_profile *rec)
 {
        unsigned long key;
 
        key = hash_long(rec->ip, ftrace_profile_bits);
-       hlist_add_head_rcu(&rec->node, &ftrace_profile_hash[key]);
+       hlist_add_head_rcu(&rec->node, &stat->hash[key]);
 }
 
-/* Interrupts must be disabled calling this */
+/*
+ * The memory is already allocated, this simply finds a new record to use.
+ */
 static struct ftrace_profile *
-ftrace_profile_alloc(unsigned long ip, bool alloc_safe)
+ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
 {
        struct ftrace_profile *rec = NULL;
 
-       /* prevent recursion */
-       if (atomic_inc_return(&__get_cpu_var(ftrace_profile_disable)) != 1)
+       /* prevent recursion (from NMIs) */
+       if (atomic_inc_return(&stat->disabled) != 1)
                goto out;
 
-       __raw_spin_lock(&ftrace_profile_rec_lock);
-
-       /* Try to always keep another page available */
-       if (!profile_pages->next && alloc_safe)
-               profile_pages->next = (void *)get_zeroed_page(GFP_ATOMIC);
-
        /*
-        * Try to find the function again since another
-        * task on another CPU could have added it
+        * Try to find the function again since an NMI
+        * could have added it
         */
-       rec = ftrace_find_profiled_func(ip);
+       rec = ftrace_find_profiled_func(stat, ip);
        if (rec)
-               goto out_unlock;
+               goto out;
 
-       if (profile_pages->index == PROFILES_PER_PAGE) {
-               if (!profile_pages->next)
-                       goto out_unlock;
-               profile_pages = profile_pages->next;
+       if (stat->pages->index == PROFILES_PER_PAGE) {
+               if (!stat->pages->next)
+                       goto out;
+               stat->pages = stat->pages->next;
        }
 
-       rec = &profile_pages->records[profile_pages->index++];
+       rec = &stat->pages->records[stat->pages->index++];
        rec->ip = ip;
-       ftrace_add_profile(rec);
+       ftrace_add_profile(stat, rec);
 
- out_unlock:
-       __raw_spin_unlock(&ftrace_profile_rec_lock);
  out:
-       atomic_dec(&__get_cpu_var(ftrace_profile_disable));
+       atomic_dec(&stat->disabled);
 
        return rec;
 }
 
-/*
- * If we are not in an interrupt, or softirq and
- * and interrupts are disabled and preemption is not enabled
- * (not in a spinlock) then it should be safe to allocate memory.
- */
-static bool ftrace_safe_to_allocate(void)
-{
-       return !in_interrupt() && irqs_disabled() && !preempt_count();
-}
-
 static void
 function_profile_call(unsigned long ip, unsigned long parent_ip)
 {
+       struct ftrace_profile_stat *stat;
        struct ftrace_profile *rec;
        unsigned long flags;
-       bool alloc_safe;
 
        if (!ftrace_profile_enabled)
                return;
 
-       alloc_safe = ftrace_safe_to_allocate();
-
        local_irq_save(flags);
-       rec = ftrace_find_profiled_func(ip);
+
+       stat = &__get_cpu_var(ftrace_profile_stats);
+       if (!stat->hash)
+               goto out;
+
+       rec = ftrace_find_profiled_func(stat, ip);
        if (!rec) {
-               rec = ftrace_profile_alloc(ip, alloc_safe);
+               rec = ftrace_profile_alloc(stat, ip);
                if (!rec)
                        goto out;
        }
@@ -534,17 +613,83 @@ function_profile_call(unsigned long ip, unsigned long parent_ip)
        local_irq_restore(flags);
 }
 
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static int profile_graph_entry(struct ftrace_graph_ent *trace)
+{
+       function_profile_call(trace->func, 0);
+       return 1;
+}
+
+static void profile_graph_return(struct ftrace_graph_ret *trace)
+{
+       struct ftrace_profile_stat *stat;
+       unsigned long long calltime;
+       struct ftrace_profile *rec;
+       unsigned long flags;
+
+       local_irq_save(flags);
+       stat = &__get_cpu_var(ftrace_profile_stats);
+       if (!stat->hash)
+               goto out;
+
+       calltime = trace->rettime - trace->calltime;
+
+       if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
+               int index;
+
+               index = trace->depth;
+
+               /* Append this call time to the parent time to subtract */
+               if (index)
+                       current->ret_stack[index - 1].subtime += calltime;
+
+               if (current->ret_stack[index].subtime < calltime)
+                       calltime -= current->ret_stack[index].subtime;
+               else
+                       calltime = 0;
+       }
+
+       rec = ftrace_find_profiled_func(stat, trace->func);
+       if (rec)
+               rec->time += calltime;
+
+ out:
+       local_irq_restore(flags);
+}
+
+static int register_ftrace_profiler(void)
+{
+       return register_ftrace_graph(&profile_graph_return,
+                                    &profile_graph_entry);
+}
+
+static void unregister_ftrace_profiler(void)
+{
+       unregister_ftrace_graph();
+}
+#else
 static struct ftrace_ops ftrace_profile_ops __read_mostly =
 {
-       .func = function_profile_call,
+       .func           = function_profile_call,
 };
 
+static int register_ftrace_profiler(void)
+{
+       return register_ftrace_function(&ftrace_profile_ops);
+}
+
+static void unregister_ftrace_profiler(void)
+{
+       unregister_ftrace_function(&ftrace_profile_ops);
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
 static ssize_t
 ftrace_profile_write(struct file *filp, const char __user *ubuf,
                     size_t cnt, loff_t *ppos)
 {
        unsigned long val;
-       char buf[64];
+       char buf[64];           /* big enough to hold a number */
        int ret;
 
        if (cnt >= sizeof(buf))
@@ -570,11 +715,15 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf,
                                goto out;
                        }
 
-                       register_ftrace_function(&ftrace_profile_ops);
+                       ret = register_ftrace_profiler();
+                       if (ret < 0) {
+                               cnt = ret;
+                               goto out;
+                       }
                        ftrace_profile_enabled = 1;
                } else {
                        ftrace_profile_enabled = 0;
-                       unregister_ftrace_function(&ftrace_profile_ops);
+                       unregister_ftrace_profiler();
                }
        }
  out:
@@ -589,7 +738,7 @@ static ssize_t
 ftrace_profile_read(struct file *filp, char __user *ubuf,
                     size_t cnt, loff_t *ppos)
 {
-       char buf[64];
+       char buf[64];           /* big enough to hold a number */
        int r;
 
        r = sprintf(buf, "%u\n", ftrace_profile_enabled);
@@ -602,16 +751,51 @@ static const struct file_operations ftrace_profile_fops = {
        .write          = ftrace_profile_write,
 };
 
+/* used to initialize the real stat files */
+static struct tracer_stat function_stats __initdata = {
+       .name           = "functions",
+       .stat_start     = function_stat_start,
+       .stat_next      = function_stat_next,
+       .stat_cmp       = function_stat_cmp,
+       .stat_headers   = function_stat_headers,
+       .stat_show      = function_stat_show
+};
+
 static void ftrace_profile_debugfs(struct dentry *d_tracer)
 {
+       struct ftrace_profile_stat *stat;
        struct dentry *entry;
+       char *name;
        int ret;
+       int cpu;
 
-       ret = register_stat_tracer(&function_stats);
-       if (ret) {
-               pr_warning("Warning: could not register "
-                          "function stats\n");
-               return;
+       for_each_possible_cpu(cpu) {
+               stat = &per_cpu(ftrace_profile_stats, cpu);
+
+               /* allocate enough for function name + cpu number */
+               name = kmalloc(32, GFP_KERNEL);
+               if (!name) {
+                       /*
+                        * The files created are permanent, if something happens
+                        * we still do not free memory.
+                        */
+                       kfree(stat);
+                       WARN(1,
+                            "Could not allocate stat file for cpu %d\n",
+                            cpu);
+                       return;
+               }
+               stat->stat = function_stats;
+               snprintf(name, 32, "function%d", cpu);
+               stat->stat.name = name;
+               ret = register_stat_tracer(&stat->stat);
+               if (ret) {
+                       WARN(1,
+                            "Could not register function stat for cpu %d\n",
+                            cpu);
+                       kfree(name);
+                       return;
+               }
        }
 
        entry = debugfs_create_file("function_profile_enabled", 0644,
@@ -744,9 +928,14 @@ void ftrace_release(void *start, unsigned long size)
 
        mutex_lock(&ftrace_lock);
        do_for_each_ftrace_rec(pg, rec) {
-               if ((rec->ip >= s) && (rec->ip < e) &&
-                   !(rec->flags & FTRACE_FL_FREE))
+               if ((rec->ip >= s) && (rec->ip < e)) {
+                       /*
+                        * rec->ip is changed in ftrace_free_rec()
+                        * It should not between s and e if record was freed.
+                        */
+                       FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
                        ftrace_free_rec(rec);
+               }
        } while_for_each_ftrace_rec();
        mutex_unlock(&ftrace_lock);
 }
@@ -1789,7 +1978,7 @@ function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
 
 static struct ftrace_ops trace_probe_ops __read_mostly =
 {
-       .func = function_trace_probe_call,
+       .func           = function_trace_probe_call,
 };
 
 static int ftrace_probe_registered;
@@ -2509,38 +2698,23 @@ static const struct file_operations ftrace_graph_fops = {
 
 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
 {
-       struct dentry *entry;
 
-       entry = debugfs_create_file("available_filter_functions", 0444,
-                                   d_tracer, NULL, &ftrace_avail_fops);
-       if (!entry)
-               pr_warning("Could not create debugfs "
-                          "'available_filter_functions' entry\n");
+       trace_create_file("available_filter_functions", 0444,
+                       d_tracer, NULL, &ftrace_avail_fops);
 
-       entry = debugfs_create_file("failures", 0444,
-                                   d_tracer, NULL, &ftrace_failures_fops);
-       if (!entry)
-               pr_warning("Could not create debugfs 'failures' entry\n");
+       trace_create_file("failures", 0444,
+                       d_tracer, NULL, &ftrace_failures_fops);
 
-       entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
-                                   NULL, &ftrace_filter_fops);
-       if (!entry)
-               pr_warning("Could not create debugfs "
-                          "'set_ftrace_filter' entry\n");
+       trace_create_file("set_ftrace_filter", 0644, d_tracer,
+                       NULL, &ftrace_filter_fops);
 
-       entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
+       trace_create_file("set_ftrace_notrace", 0644, d_tracer,
                                    NULL, &ftrace_notrace_fops);
-       if (!entry)
-               pr_warning("Could not create debugfs "
-                          "'set_ftrace_notrace' entry\n");
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
+       trace_create_file("set_graph_function", 0444, d_tracer,
                                    NULL,
                                    &ftrace_graph_fops);
-       if (!entry)
-               pr_warning("Could not create debugfs "
-                          "'set_graph_function' entry\n");
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
        return 0;
@@ -2798,7 +2972,6 @@ static const struct file_operations ftrace_pid_fops = {
 static __init int ftrace_init_debugfs(void)
 {
        struct dentry *d_tracer;
-       struct dentry *entry;
 
        d_tracer = tracing_init_dentry();
        if (!d_tracer)
@@ -2806,11 +2979,8 @@ static __init int ftrace_init_debugfs(void)
 
        ftrace_init_dyn_debugfs(d_tracer);
 
-       entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
-                                   NULL, &ftrace_pid_fops);
-       if (!entry)
-               pr_warning("Could not create debugfs "
-                          "'set_ftrace_pid' entry\n");
+       trace_create_file("set_ftrace_pid", 0644, d_tracer,
+                           NULL, &ftrace_pid_fops);
 
        ftrace_profile_debugfs(d_tracer);
 
@@ -2922,7 +3092,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 
-static atomic_t ftrace_graph_active;
+static int ftrace_graph_active;
 static struct notifier_block ftrace_suspend_notifier;
 
 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
@@ -3074,7 +3244,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
        mutex_lock(&ftrace_lock);
 
        /* we currently allow only one tracer registered at a time */
-       if (atomic_read(&ftrace_graph_active)) {
+       if (ftrace_graph_active) {
                ret = -EBUSY;
                goto out;
        }
@@ -3082,10 +3252,10 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
        ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
        register_pm_notifier(&ftrace_suspend_notifier);
 
-       atomic_inc(&ftrace_graph_active);
+       ftrace_graph_active++;
        ret = start_graph_tracing();
        if (ret) {
-               atomic_dec(&ftrace_graph_active);
+               ftrace_graph_active--;
                goto out;
        }
 
@@ -3103,20 +3273,24 @@ void unregister_ftrace_graph(void)
 {
        mutex_lock(&ftrace_lock);
 
-       atomic_dec(&ftrace_graph_active);
+       if (unlikely(!ftrace_graph_active))
+               goto out;
+
+       ftrace_graph_active--;
        unregister_trace_sched_switch(ftrace_graph_probe_sched_switch);
        ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
        ftrace_graph_entry = ftrace_graph_entry_stub;
        ftrace_shutdown(FTRACE_STOP_FUNC_RET);
        unregister_pm_notifier(&ftrace_suspend_notifier);
 
+ out:
        mutex_unlock(&ftrace_lock);
 }
 
 /* Allocate a return stack for newly created task */
 void ftrace_graph_init_task(struct task_struct *t)
 {
-       if (atomic_read(&ftrace_graph_active)) {
+       if (ftrace_graph_active) {
                t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
                                * sizeof(struct ftrace_ret_stack),
                                GFP_KERNEL);
This page took 0.062905 seconds and 5 git commands to generate.