perf: Introduce new perf_fetch_caller_regs() for hot regs snapshot
[deliverable/linux.git] / kernel / trace / trace_event_profile.c
CommitLineData
ac199db0
PZ
1/*
2 * trace event based perf counter profiling
3 *
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5 *
6 */
7
558e6547 8#include <linux/module.h>
430ad5a6 9#include <linux/kprobes.h>
ac199db0
PZ
10#include "trace.h"
11
20ab4425 12
430ad5a6
XG
13static char *perf_trace_buf;
14static char *perf_trace_buf_nmi;
20ab4425 15
ce71b9df
FW
16typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ;
17
20ab4425
FW
18/* Count the events in use (per event id, not per instance) */
19static int total_profile_count;
20
e5e25cf4
FW
21static int ftrace_profile_enable_event(struct ftrace_event_call *event)
22{
ce71b9df 23 char *buf;
20ab4425
FW
24 int ret = -ENOMEM;
25
e00bf2ec 26 if (event->profile_count++ > 0)
e5e25cf4
FW
27 return 0;
28
fe8e5b5a 29 if (!total_profile_count) {
ce71b9df 30 buf = (char *)alloc_percpu(perf_trace_t);
20ab4425
FW
31 if (!buf)
32 goto fail_buf;
33
444a2a3b 34 rcu_assign_pointer(perf_trace_buf, buf);
20ab4425 35
ce71b9df 36 buf = (char *)alloc_percpu(perf_trace_t);
20ab4425
FW
37 if (!buf)
38 goto fail_buf_nmi;
39
444a2a3b 40 rcu_assign_pointer(perf_trace_buf_nmi, buf);
20ab4425
FW
41 }
42
d7a4b414 43 ret = event->profile_enable(event);
fe8e5b5a
FW
44 if (!ret) {
45 total_profile_count++;
20ab4425 46 return 0;
fe8e5b5a 47 }
20ab4425 48
20ab4425 49fail_buf_nmi:
fe8e5b5a 50 if (!total_profile_count) {
444a2a3b
FW
51 free_percpu(perf_trace_buf_nmi);
52 free_percpu(perf_trace_buf);
53 perf_trace_buf_nmi = NULL;
54 perf_trace_buf = NULL;
fe8e5b5a 55 }
20ab4425 56fail_buf:
e00bf2ec 57 event->profile_count--;
20ab4425
FW
58
59 return ret;
e5e25cf4
FW
60}
61
ac199db0
PZ
62int ftrace_profile_enable(int event_id)
63{
64 struct ftrace_event_call *event;
20c8928a 65 int ret = -EINVAL;
ac199db0 66
20c8928a 67 mutex_lock(&event_mutex);
a59fd602 68 list_for_each_entry(event, &ftrace_events, list) {
558e6547
LZ
69 if (event->id == event_id && event->profile_enable &&
70 try_module_get(event->mod)) {
e5e25cf4 71 ret = ftrace_profile_enable_event(event);
20c8928a
LZ
72 break;
73 }
ac199db0 74 }
20c8928a 75 mutex_unlock(&event_mutex);
ac199db0 76
20c8928a 77 return ret;
ac199db0
PZ
78}
79
e5e25cf4
FW
80static void ftrace_profile_disable_event(struct ftrace_event_call *event)
81{
ce71b9df 82 char *buf, *nmi_buf;
20ab4425 83
e00bf2ec 84 if (--event->profile_count > 0)
e5e25cf4
FW
85 return;
86
d7a4b414 87 event->profile_disable(event);
20ab4425
FW
88
89 if (!--total_profile_count) {
444a2a3b
FW
90 buf = perf_trace_buf;
91 rcu_assign_pointer(perf_trace_buf, NULL);
20ab4425 92
444a2a3b
FW
93 nmi_buf = perf_trace_buf_nmi;
94 rcu_assign_pointer(perf_trace_buf_nmi, NULL);
20ab4425
FW
95
96 /*
97 * Ensure every events in profiling have finished before
98 * releasing the buffers
99 */
100 synchronize_sched();
101
102 free_percpu(buf);
103 free_percpu(nmi_buf);
104 }
e5e25cf4
FW
105}
106
ac199db0
PZ
107void ftrace_profile_disable(int event_id)
108{
109 struct ftrace_event_call *event;
110
20c8928a 111 mutex_lock(&event_mutex);
a59fd602 112 list_for_each_entry(event, &ftrace_events, list) {
20c8928a 113 if (event->id == event_id) {
e5e25cf4 114 ftrace_profile_disable_event(event);
558e6547 115 module_put(event->mod);
20c8928a
LZ
116 break;
117 }
ac199db0 118 }
20c8928a 119 mutex_unlock(&event_mutex);
ac199db0 120}
430ad5a6
XG
121
122__kprobes void *ftrace_perf_buf_prepare(int size, unsigned short type,
123 int *rctxp, unsigned long *irq_flags)
124{
125 struct trace_entry *entry;
126 char *trace_buf, *raw_data;
127 int pc, cpu;
128
129 pc = preempt_count();
130
131 /* Protect the per cpu buffer, begin the rcu read side */
132 local_irq_save(*irq_flags);
133
134 *rctxp = perf_swevent_get_recursion_context();
135 if (*rctxp < 0)
136 goto err_recursion;
137
138 cpu = smp_processor_id();
139
140 if (in_nmi())
141 trace_buf = rcu_dereference(perf_trace_buf_nmi);
142 else
143 trace_buf = rcu_dereference(perf_trace_buf);
144
145 if (!trace_buf)
146 goto err;
147
148 raw_data = per_cpu_ptr(trace_buf, cpu);
149
150 /* zero the dead bytes from align to not leak stack to user */
151 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
152
153 entry = (struct trace_entry *)raw_data;
154 tracing_generic_entry_update(entry, *irq_flags, pc);
155 entry->type = type;
156
157 return raw_data;
158err:
159 perf_swevent_put_recursion_context(*rctxp);
160err_recursion:
161 local_irq_restore(*irq_flags);
162 return NULL;
163}
164EXPORT_SYMBOL_GPL(ftrace_perf_buf_prepare);
This page took 0.122161 seconds and 5 git commands to generate.