From: Ingo Molnar Date: Thu, 19 Feb 2009 11:13:33 +0000 (+0100) Subject: Merge branch 'mainline/function-graph' of git://git.kernel.org/pub/scm/linux/kernel... X-Git-Url: http://drtracing.org/?a=commitdiff_plain;h=4cd0332db7e8f57cc082bab11d82c064a9721737;p=deliverable%2Flinux.git Merge branch 'mainline/function-graph' of git://git./linux/kernel/git/rostedt/linux-2.6-trace into tracing/function-graph-tracer --- 4cd0332db7e8f57cc082bab11d82c064a9721737 diff --cc arch/x86/kernel/ftrace.c index 2f9c0c8cb4c7,76f7141e0f91..c2e057d9f88c --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@@ -367,81 -368,27 +367,8 @@@ int ftrace_disable_ftrace_graph_caller( return ftrace_mod_jmp(ip, old_offset, new_offset); } -#else /* CONFIG_DYNAMIC_FTRACE */ - -/* - * These functions are picked from those used on - * this page for dynamic ftrace. They have been - * simplified to ignore all traces in NMI context. - */ -static atomic_t in_nmi; - -void ftrace_nmi_enter(void) -{ - atomic_inc(&in_nmi); -} - -void ftrace_nmi_exit(void) -{ - atomic_dec(&in_nmi); -} - #endif /* !CONFIG_DYNAMIC_FTRACE */ - /* Add a function return address to the trace stack on thread info.*/ - static int push_return_trace(unsigned long ret, unsigned long long time, - unsigned long func, int *depth) - { - int index; - - if (!current->ret_stack) - return -EBUSY; - - /* The return trace stack is full */ - if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { - atomic_inc(¤t->trace_overrun); - return -EBUSY; - } - - index = ++current->curr_ret_stack; - barrier(); - current->ret_stack[index].ret = ret; - current->ret_stack[index].func = func; - current->ret_stack[index].calltime = time; - *depth = index; - - return 0; - } - - /* Retrieve a function return address to the trace stack on thread info.*/ - static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) - { - int index; - - index = current->curr_ret_stack; - - if (unlikely(index < 0)) { - ftrace_graph_stop(); - WARN_ON(1); - /* Might as well panic, otherwise we have no where to go */ - *ret = (unsigned long)panic; - return; - } - - *ret = current->ret_stack[index].ret; - trace->func = current->ret_stack[index].func; - trace->calltime = current->ret_stack[index].calltime; - trace->overrun = atomic_read(¤t->trace_overrun); - trace->depth = index; - barrier(); - current->curr_ret_stack--; - - } - - /* - * Send the trace to the ring-buffer. - * @return the original return address. - */ - unsigned long ftrace_return_to_handler(void) - { - struct ftrace_graph_ret trace; - unsigned long ret; - - pop_return_trace(&trace, &ret); - trace.rettime = cpu_clock(raw_smp_processor_id()); - ftrace_graph_return(&trace); - - if (unlikely(!ret)) { - ftrace_graph_stop(); - WARN_ON(1); - /* Might as well panic. What else to do? */ - ret = (unsigned long)panic; - } - - return ret; - } - /* * Hook the return address and push it in the stack of return addrs * in current thread info. @@@ -492,9 -439,16 +419,9 @@@ void prepare_ftrace_return(unsigned lon return; } - if (unlikely(!__kernel_text_address(old))) { - ftrace_graph_stop(); - *parent = old; - WARN_ON(1); - return; - } - calltime = cpu_clock(raw_smp_processor_id()); - if (push_return_trace(old, calltime, + if (ftrace_push_return_trace(old, calltime, self_addr, &trace.depth) == -EBUSY) { *parent = old; return; diff --cc kernel/trace/trace_functions_graph.c index 0ff5cb661900,dce71a5b51bc..6c7738e4f98b --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@@ -48,11 -40,91 +48,86 @@@ static struct tracer_flags tracer_flag }; /* pid on the last trace processed */ -static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 }; + + /* Add a function return address to the trace stack on thread info.*/ + int + ftrace_push_return_trace(unsigned long ret, unsigned long long time, + unsigned long func, int *depth) + { + int index; + + if (!current->ret_stack) + return -EBUSY; + + /* The return trace stack is full */ + if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { + atomic_inc(¤t->trace_overrun); + return -EBUSY; + } + + index = ++current->curr_ret_stack; + barrier(); + current->ret_stack[index].ret = ret; + current->ret_stack[index].func = func; + current->ret_stack[index].calltime = time; + *depth = index; + + return 0; + } + + /* Retrieve a function return address to the trace stack on thread info.*/ + void + ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) + { + int index; + + index = current->curr_ret_stack; + + if (unlikely(index < 0)) { + ftrace_graph_stop(); + WARN_ON(1); + /* Might as well panic, otherwise we have no where to go */ + *ret = (unsigned long)panic; + return; + } + + *ret = current->ret_stack[index].ret; + trace->func = current->ret_stack[index].func; + trace->calltime = current->ret_stack[index].calltime; + trace->overrun = atomic_read(¤t->trace_overrun); + trace->depth = index; + barrier(); + current->curr_ret_stack--; + + } + + /* + * Send the trace to the ring-buffer. + * @return the original return address. + */ + unsigned long ftrace_return_to_handler(void) + { + struct ftrace_graph_ret trace; + unsigned long ret; + + ftrace_pop_return_trace(&trace, &ret); + trace.rettime = cpu_clock(raw_smp_processor_id()); + ftrace_graph_return(&trace); + + if (unlikely(!ret)) { + ftrace_graph_stop(); + WARN_ON(1); + /* Might as well panic. What else to do? */ + ret = (unsigned long)panic; + } + + return ret; + } + static int graph_trace_init(struct trace_array *tr) { - int cpu, ret; - - for_each_online_cpu(cpu) - tracing_reset(tr, cpu); - - ret = register_ftrace_graph(&trace_graph_return, + int ret = register_ftrace_graph(&trace_graph_return, &trace_graph_entry); if (ret) return ret;