#include <linux/list.h>
#include <linux/hash.h>
-#include <trace/sched.h>
+#include <trace/events/sched.h>
#include <asm/ftrace.h>
static struct ftrace_ops ftrace_list_end __read_mostly =
{
- .func = ftrace_stub,
+ .func = ftrace_stub,
};
static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
#define PROFILES_PER_PAGE \
(PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
-static int ftrace_profile_bits;
-static int ftrace_profile_enabled;
+static int ftrace_profile_bits __read_mostly;
+static int ftrace_profile_enabled __read_mostly;
+
+/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
static DEFINE_MUTEX(ftrace_profile_lock);
static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
static int function_stat_headers(struct seq_file *m)
{
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- seq_printf(m, " Function Hit Time\n"
- " -------- --- ----\n");
+ seq_printf(m, " Function "
+ "Hit Time Avg\n"
+ " -------- "
+ "--- ---- ---\n");
#else
seq_printf(m, " Function Hit\n"
" -------- ---\n");
struct ftrace_profile *rec = v;
char str[KSYM_SYMBOL_LEN];
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- static struct trace_seq s;
static DEFINE_MUTEX(mutex);
-
- mutex_lock(&mutex);
- trace_seq_init(&s);
- trace_print_graph_duration(rec->time, &s);
+ static struct trace_seq s;
+ unsigned long long avg;
#endif
kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
seq_printf(m, " ");
+ avg = rec->time;
+ do_div(avg, rec->counter);
+
+ mutex_lock(&mutex);
+ trace_seq_init(&s);
+ trace_print_graph_duration(rec->time, &s);
+ trace_seq_puts(&s, " ");
+ trace_print_graph_duration(avg, &s);
trace_print_seq(m, &s);
mutex_unlock(&mutex);
#endif
int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
{
struct ftrace_profile_page *pg;
+ int functions;
+ int pages;
int i;
/* If we already allocated, do nothing */
if (!stat->pages)
return -ENOMEM;
+#ifdef CONFIG_DYNAMIC_FTRACE
+ functions = ftrace_update_tot_cnt;
+#else
+ /*
+ * We do not know the number of functions that exist because
+ * dynamic tracing is what counts them. With past experience
+ * we have around 20K functions. That should be more than enough.
+ * It is highly unlikely we will execute every function in
+ * the kernel.
+ */
+ functions = 20000;
+#endif
+
pg = stat->start = stat->pages;
- /* allocate 10 more pages to start */
- for (i = 0; i < 10; i++) {
+ pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
+
+ for (i = 0; i < pages; i++) {
pg->next = (void *)get_zeroed_page(GFP_KERNEL);
- /*
- * We only care about allocating profile_pages, if
- * we failed to allocate here, hopefully we will allocate
- * later.
- */
if (!pg->next)
- break;
+ goto out_free;
pg = pg->next;
}
return 0;
+
+ out_free:
+ pg = stat->start;
+ while (pg) {
+ unsigned long tmp = (unsigned long)pg;
+
+ pg = pg->next;
+ free_page(tmp);
+ }
+
+ free_page((unsigned long)stat->pages);
+ stat->pages = NULL;
+ stat->start = NULL;
+
+ return -ENOMEM;
}
static int ftrace_profile_init_cpu(int cpu)
ftrace_profile_bits++;
}
- /* Preallocate a few pages */
+ /* Preallocate the function profiling pages */
if (ftrace_profile_pages_init(stat) < 0) {
kfree(stat->hash);
stat->hash = NULL;
hlist_add_head_rcu(&rec->node, &stat->hash[key]);
}
-/* Interrupts must be disabled calling this */
+/*
+ * The memory is already allocated, this simply finds a new record to use.
+ */
static struct ftrace_profile *
-ftrace_profile_alloc(struct ftrace_profile_stat *stat,
- unsigned long ip, bool alloc_safe)
+ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
{
struct ftrace_profile *rec = NULL;
- /* prevent recursion */
+ /* prevent recursion (from NMIs) */
if (atomic_inc_return(&stat->disabled) != 1)
goto out;
- /* Try to always keep another page available */
- if (!stat->pages->next && alloc_safe)
- stat->pages->next = (void *)get_zeroed_page(GFP_ATOMIC);
-
/*
- * Try to find the function again since another
- * task on another CPU could have added it
+ * Try to find the function again since an NMI
+ * could have added it
*/
rec = ftrace_find_profiled_func(stat, ip);
if (rec)
return rec;
}
-/*
- * If we are not in an interrupt, or softirq and
- * and interrupts are disabled and preemption is not enabled
- * (not in a spinlock) then it should be safe to allocate memory.
- */
-static bool ftrace_safe_to_allocate(void)
-{
- return !in_interrupt() && irqs_disabled() && !preempt_count();
-}
-
static void
function_profile_call(unsigned long ip, unsigned long parent_ip)
{
struct ftrace_profile_stat *stat;
struct ftrace_profile *rec;
unsigned long flags;
- bool alloc_safe;
if (!ftrace_profile_enabled)
return;
- alloc_safe = ftrace_safe_to_allocate();
-
local_irq_save(flags);
stat = &__get_cpu_var(ftrace_profile_stats);
rec = ftrace_find_profiled_func(stat, ip);
if (!rec) {
- rec = ftrace_profile_alloc(stat, ip, alloc_safe);
+ rec = ftrace_profile_alloc(stat, ip);
if (!rec)
goto out;
}
static void profile_graph_return(struct ftrace_graph_ret *trace)
{
struct ftrace_profile_stat *stat;
+ unsigned long long calltime;
struct ftrace_profile *rec;
unsigned long flags;
if (!stat->hash)
goto out;
+ calltime = trace->rettime - trace->calltime;
+
+ if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
+ int index;
+
+ index = trace->depth;
+
+ /* Append this call time to the parent time to subtract */
+ if (index)
+ current->ret_stack[index - 1].subtime += calltime;
+
+ if (current->ret_stack[index].subtime < calltime)
+ calltime -= current->ret_stack[index].subtime;
+ else
+ calltime = 0;
+ }
+
rec = ftrace_find_profiled_func(stat, trace->func);
if (rec)
- rec->time += trace->rettime - trace->calltime;
+ rec->time += calltime;
+
out:
local_irq_restore(flags);
}
#else
static struct ftrace_ops ftrace_profile_ops __read_mostly =
{
- .func = function_profile_call,
+ .func = function_profile_call,
};
static int register_ftrace_profiler(void)
size_t cnt, loff_t *ppos)
{
unsigned long val;
- char buf[64];
+ char buf[64]; /* big enough to hold a number */
int ret;
if (cnt >= sizeof(buf))
ftrace_profile_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
- char buf[64];
+ char buf[64]; /* big enough to hold a number */
int r;
r = sprintf(buf, "%u\n", ftrace_profile_enabled);
/* used to initialize the real stat files */
static struct tracer_stat function_stats __initdata = {
- .name = "functions",
- .stat_start = function_stat_start,
- .stat_next = function_stat_next,
- .stat_cmp = function_stat_cmp,
- .stat_headers = function_stat_headers,
- .stat_show = function_stat_show
+ .name = "functions",
+ .stat_start = function_stat_start,
+ .stat_next = function_stat_next,
+ .stat_cmp = function_stat_cmp,
+ .stat_headers = function_stat_headers,
+ .stat_show = function_stat_show
};
static void ftrace_profile_debugfs(struct dentry *d_tracer)
mutex_lock(&ftrace_lock);
do_for_each_ftrace_rec(pg, rec) {
- if ((rec->ip >= s) && (rec->ip < e) &&
- !(rec->flags & FTRACE_FL_FREE))
+ if ((rec->ip >= s) && (rec->ip < e)) {
+ /*
+ * rec->ip is changed in ftrace_free_rec()
+ * It should not between s and e if record was freed.
+ */
+ FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
ftrace_free_rec(rec);
+ }
} while_for_each_ftrace_rec();
mutex_unlock(&ftrace_lock);
}
static struct ftrace_ops trace_probe_ops __read_mostly =
{
- .func = function_trace_probe_call,
+ .func = function_trace_probe_call,
};
static int ftrace_probe_registered;
static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
{
- struct dentry *entry;
- entry = debugfs_create_file("available_filter_functions", 0444,
- d_tracer, NULL, &ftrace_avail_fops);
- if (!entry)
- pr_warning("Could not create debugfs "
- "'available_filter_functions' entry\n");
+ trace_create_file("available_filter_functions", 0444,
+ d_tracer, NULL, &ftrace_avail_fops);
- entry = debugfs_create_file("failures", 0444,
- d_tracer, NULL, &ftrace_failures_fops);
- if (!entry)
- pr_warning("Could not create debugfs 'failures' entry\n");
+ trace_create_file("failures", 0444,
+ d_tracer, NULL, &ftrace_failures_fops);
- entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
- NULL, &ftrace_filter_fops);
- if (!entry)
- pr_warning("Could not create debugfs "
- "'set_ftrace_filter' entry\n");
+ trace_create_file("set_ftrace_filter", 0644, d_tracer,
+ NULL, &ftrace_filter_fops);
- entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
+ trace_create_file("set_ftrace_notrace", 0644, d_tracer,
NULL, &ftrace_notrace_fops);
- if (!entry)
- pr_warning("Could not create debugfs "
- "'set_ftrace_notrace' entry\n");
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
+ trace_create_file("set_graph_function", 0444, d_tracer,
NULL,
&ftrace_graph_fops);
- if (!entry)
- pr_warning("Could not create debugfs "
- "'set_graph_function' entry\n");
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
return 0;
static __init int ftrace_init_debugfs(void)
{
struct dentry *d_tracer;
- struct dentry *entry;
d_tracer = tracing_init_dentry();
if (!d_tracer)
ftrace_init_dyn_debugfs(d_tracer);
- entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
- NULL, &ftrace_pid_fops);
- if (!entry)
- pr_warning("Could not create debugfs "
- "'set_ftrace_pid' entry\n");
+ trace_create_file("set_ftrace_pid", 0644, d_tracer,
+ NULL, &ftrace_pid_fops);
ftrace_profile_debugfs(d_tracer);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-static atomic_t ftrace_graph_active;
+static int ftrace_graph_active;
static struct notifier_block ftrace_suspend_notifier;
int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
mutex_lock(&ftrace_lock);
/* we currently allow only one tracer registered at a time */
- if (atomic_read(&ftrace_graph_active)) {
+ if (ftrace_graph_active) {
ret = -EBUSY;
goto out;
}
ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
register_pm_notifier(&ftrace_suspend_notifier);
- atomic_inc(&ftrace_graph_active);
+ ftrace_graph_active++;
ret = start_graph_tracing();
if (ret) {
- atomic_dec(&ftrace_graph_active);
+ ftrace_graph_active--;
goto out;
}
{
mutex_lock(&ftrace_lock);
- atomic_dec(&ftrace_graph_active);
+ if (unlikely(!ftrace_graph_active))
+ goto out;
+
+ ftrace_graph_active--;
unregister_trace_sched_switch(ftrace_graph_probe_sched_switch);
ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
ftrace_graph_entry = ftrace_graph_entry_stub;
ftrace_shutdown(FTRACE_STOP_FUNC_RET);
unregister_pm_notifier(&ftrace_suspend_notifier);
+ out:
mutex_unlock(&ftrace_lock);
}
/* Allocate a return stack for newly created task */
void ftrace_graph_init_task(struct task_struct *t)
{
- if (atomic_read(&ftrace_graph_active)) {
+ if (ftrace_graph_active) {
t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
* sizeof(struct ftrace_ret_stack),
GFP_KERNEL);