tracing: Kill unused and puzzled sample code in ftrace.h
authorShan Wei <davidshan@tencent.com>
Sat, 3 Nov 2012 04:38:33 +0000 (12:38 +0800)
committerSteven Rostedt <rostedt@goodmis.org>
Tue, 13 Nov 2012 20:51:21 +0000 (15:51 -0500)
When doing per-cpu helper optimizing work, find that this code is so puzzled.
1. It's mark as comment text, maybe a sample function for guidelines
   or a todo work.
2. But, this sample code is odd where struct perf_trace_buf is nonexistent.
   commit ce71b9 delete struct perf_trace_buf definition.

   Author: Frederic Weisbecker <fweisbec@gmail.com>
   Date:   Sun Nov 22 05:26:55 2009 +0100

   tracing: Use the perf recursion protection from trace event

Is it necessary to keep there?
just compile test.

Link: http://lkml.kernel.org/r/50949FC9.6050202@gmail.com
Signed-off-by: Shan Wei <davidshan@tencent.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
include/trace/ftrace.h

index 698f2a890322b10a51d736af9431b00e595b5e1f..40dc5e8fe3401a6ba1e5c601d17f5cfb18913250 100644 (file)
@@ -619,79 +619,6 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
 
 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 
-/*
- * Define the insertion callback to perf events
- *
- * The job is very similar to ftrace_raw_event_<call> except that we don't
- * insert in the ring buffer but in a perf counter.
- *
- * static void ftrace_perf_<call>(proto)
- * {
- *     struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
- *     struct ftrace_event_call *event_call = &event_<call>;
- *     extern void perf_tp_event(int, u64, u64, void *, int);
- *     struct ftrace_raw_##call *entry;
- *     struct perf_trace_buf *trace_buf;
- *     u64 __addr = 0, __count = 1;
- *     unsigned long irq_flags;
- *     struct trace_entry *ent;
- *     int __entry_size;
- *     int __data_size;
- *     int __cpu
- *     int pc;
- *
- *     pc = preempt_count();
- *
- *     __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
- *
- *     // Below we want to get the aligned size by taking into account
- *     // the u32 field that will later store the buffer size
- *     __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
- *                          sizeof(u64));
- *     __entry_size -= sizeof(u32);
- *
- *     // Protect the non nmi buffer
- *     // This also protects the rcu read side
- *     local_irq_save(irq_flags);
- *     __cpu = smp_processor_id();
- *
- *     if (in_nmi())
- *             trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
- *     else
- *             trace_buf = rcu_dereference_sched(perf_trace_buf);
- *
- *     if (!trace_buf)
- *             goto end;
- *
- *     trace_buf = per_cpu_ptr(trace_buf, __cpu);
- *
- *     // Avoid recursion from perf that could mess up the buffer
- *     if (trace_buf->recursion++)
- *             goto end_recursion;
- *
- *     raw_data = trace_buf->buf;
- *
- *     // Make recursion update visible before entering perf_tp_event
- *     // so that we protect from perf recursions.
- *
- *     barrier();
- *
- *     //zero dead bytes from alignment to avoid stack leak to userspace:
- *     *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
- *     entry = (struct ftrace_raw_<call> *)raw_data;
- *     ent = &entry->ent;
- *     tracing_generic_entry_update(ent, irq_flags, pc);
- *     ent->type = event_call->id;
- *
- *     <tstruct> <- do some jobs with dynamic arrays
- *
- *     <assign>  <- affect our values
- *
- *     perf_tp_event(event_call->id, __addr, __count, entry,
- *                  __entry_size);  <- submit them to perf counter
- *
- * }
- */
 
 #ifdef CONFIG_PERF_EVENTS
 
This page took 0.033454 seconds and 5 git commands to generate.