Merge commit 'origin/master' into next
[deliverable/linux.git] / kernel / trace / trace_functions_graph.c
index 4bf39fcae97afadc384b104e5d77be0120e375e3..dce71a5b51bce5a953b5000628bbf1c58dc37cff 100644 (file)
@@ -42,6 +42,81 @@ static struct tracer_flags tracer_flags = {
 /* pid on the last trace processed */
 static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 };
 
+/* Add a function return address to the trace stack on thread info.*/
+int
+ftrace_push_return_trace(unsigned long ret, unsigned long long time,
+                        unsigned long func, int *depth)
+{
+       int index;
+
+       if (!current->ret_stack)
+               return -EBUSY;
+
+       /* The return trace stack is full */
+       if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
+               atomic_inc(&current->trace_overrun);
+               return -EBUSY;
+       }
+
+       index = ++current->curr_ret_stack;
+       barrier();
+       current->ret_stack[index].ret = ret;
+       current->ret_stack[index].func = func;
+       current->ret_stack[index].calltime = time;
+       *depth = index;
+
+       return 0;
+}
+
+/* Retrieve a function return address to the trace stack on thread info.*/
+void
+ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
+{
+       int index;
+
+       index = current->curr_ret_stack;
+
+       if (unlikely(index < 0)) {
+               ftrace_graph_stop();
+               WARN_ON(1);
+               /* Might as well panic, otherwise we have no where to go */
+               *ret = (unsigned long)panic;
+               return;
+       }
+
+       *ret = current->ret_stack[index].ret;
+       trace->func = current->ret_stack[index].func;
+       trace->calltime = current->ret_stack[index].calltime;
+       trace->overrun = atomic_read(&current->trace_overrun);
+       trace->depth = index;
+       barrier();
+       current->curr_ret_stack--;
+
+}
+
+/*
+ * Send the trace to the ring-buffer.
+ * @return the original return address.
+ */
+unsigned long ftrace_return_to_handler(void)
+{
+       struct ftrace_graph_ret trace;
+       unsigned long ret;
+
+       ftrace_pop_return_trace(&trace, &ret);
+       trace.rettime = cpu_clock(raw_smp_processor_id());
+       ftrace_graph_return(&trace);
+
+       if (unlikely(!ret)) {
+               ftrace_graph_stop();
+               WARN_ON(1);
+               /* Might as well panic. What else to do? */
+               ret = (unsigned long)panic;
+       }
+
+       return ret;
+}
+
 static int graph_trace_init(struct trace_array *tr)
 {
        int cpu, ret;
@@ -79,7 +154,7 @@ print_graph_cpu(struct trace_seq *s, int cpu)
        int i;
        int ret;
        int log10_this = log10_cpu(cpu);
-       int log10_all = log10_cpu(cpus_weight_nr(cpu_online_map));
+       int log10_all = log10_cpu(cpumask_weight(cpu_online_mask));
 
 
        /*
This page took 0.02554 seconds and 5 git commands to generate.