2 * trace event based perf event profiling/tracing
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
8 #include <linux/module.h>
9 #include <linux/kprobes.h>
12 DEFINE_PER_CPU(struct pt_regs
, perf_trace_regs
);
13 EXPORT_PER_CPU_SYMBOL_GPL(perf_trace_regs
);
15 EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs
);
17 static char *perf_trace_buf
;
18 static char *perf_trace_buf_nmi
;
21 * Force it to be aligned to unsigned long to avoid misaligned accesses
24 typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE
/ sizeof(unsigned long)])
27 /* Count the events in use (per event id, not per instance) */
28 static int total_ref_count
;
30 static int perf_trace_event_enable(struct ftrace_event_call
*event
)
35 if (event
->perf_refcount
++ > 0)
38 if (!total_ref_count
) {
39 buf
= (char *)alloc_percpu(perf_trace_t
);
43 rcu_assign_pointer(perf_trace_buf
, buf
);
45 buf
= (char *)alloc_percpu(perf_trace_t
);
49 rcu_assign_pointer(perf_trace_buf_nmi
, buf
);
52 if (event
->class->reg
)
53 ret
= event
->class->reg(event
, TRACE_REG_PERF_REGISTER
);
55 ret
= tracepoint_probe_register(event
->name
,
56 event
->class->perf_probe
,
64 if (!total_ref_count
) {
65 free_percpu(perf_trace_buf_nmi
);
66 free_percpu(perf_trace_buf
);
67 perf_trace_buf_nmi
= NULL
;
68 perf_trace_buf
= NULL
;
71 event
->perf_refcount
--;
76 int perf_trace_enable(int event_id
)
78 struct ftrace_event_call
*event
;
81 mutex_lock(&event_mutex
);
82 list_for_each_entry(event
, &ftrace_events
, list
) {
83 if (event
->event
.type
== event_id
&&
84 event
->class && event
->class->perf_probe
&&
85 try_module_get(event
->mod
)) {
86 ret
= perf_trace_event_enable(event
);
90 mutex_unlock(&event_mutex
);
95 static void perf_trace_event_disable(struct ftrace_event_call
*event
)
99 if (--event
->perf_refcount
> 0)
102 if (event
->class->reg
)
103 event
->class->reg(event
, TRACE_REG_PERF_UNREGISTER
);
105 tracepoint_probe_unregister(event
->name
, event
->class->perf_probe
, event
);
107 if (!--total_ref_count
) {
108 buf
= perf_trace_buf
;
109 rcu_assign_pointer(perf_trace_buf
, NULL
);
111 nmi_buf
= perf_trace_buf_nmi
;
112 rcu_assign_pointer(perf_trace_buf_nmi
, NULL
);
115 * Ensure every events in profiling have finished before
116 * releasing the buffers
121 free_percpu(nmi_buf
);
125 void perf_trace_disable(int event_id
)
127 struct ftrace_event_call
*event
;
129 mutex_lock(&event_mutex
);
130 list_for_each_entry(event
, &ftrace_events
, list
) {
131 if (event
->event
.type
== event_id
) {
132 perf_trace_event_disable(event
);
133 module_put(event
->mod
);
137 mutex_unlock(&event_mutex
);
140 __kprobes
void *perf_trace_buf_prepare(int size
, unsigned short type
,
141 int *rctxp
, unsigned long *irq_flags
)
143 struct trace_entry
*entry
;
144 char *trace_buf
, *raw_data
;
147 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE
% sizeof(unsigned long));
149 pc
= preempt_count();
151 /* Protect the per cpu buffer, begin the rcu read side */
152 local_irq_save(*irq_flags
);
154 *rctxp
= perf_swevent_get_recursion_context();
158 cpu
= smp_processor_id();
161 trace_buf
= rcu_dereference_sched(perf_trace_buf_nmi
);
163 trace_buf
= rcu_dereference_sched(perf_trace_buf
);
168 raw_data
= per_cpu_ptr(trace_buf
, cpu
);
170 /* zero the dead bytes from align to not leak stack to user */
171 memset(&raw_data
[size
- sizeof(u64
)], 0, sizeof(u64
));
173 entry
= (struct trace_entry
*)raw_data
;
174 tracing_generic_entry_update(entry
, *irq_flags
, pc
);
179 perf_swevent_put_recursion_context(*rctxp
);
181 local_irq_restore(*irq_flags
);
184 EXPORT_SYMBOL_GPL(perf_trace_buf_prepare
);