ftrace: set up trace event hash infrastructure
[deliverable/linux.git] / kernel / trace / trace_power.c
1 /*
2 * ring buffer based C-state tracer
3 *
4 * Arjan van de Ven <arjan@linux.intel.com>
5 * Copyright (C) 2008 Intel Corporation
6 *
7 * Much is borrowed from trace_boot.c which is
8 * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
9 *
10 */
11
12 #include <linux/init.h>
13 #include <linux/debugfs.h>
14 #include <linux/ftrace.h>
15 #include <linux/kallsyms.h>
16 #include <linux/module.h>
17
18 #include "trace.h"
19 #include "trace_output.h"
20
21 static struct trace_array *power_trace;
22 static int __read_mostly trace_power_enabled;
23
24
25 static void start_power_trace(struct trace_array *tr)
26 {
27 trace_power_enabled = 1;
28 }
29
30 static void stop_power_trace(struct trace_array *tr)
31 {
32 trace_power_enabled = 0;
33 }
34
35
36 static int power_trace_init(struct trace_array *tr)
37 {
38 int cpu;
39 power_trace = tr;
40
41 trace_power_enabled = 1;
42
43 for_each_cpu_mask(cpu, cpu_possible_map)
44 tracing_reset(tr, cpu);
45 return 0;
46 }
47
48 static enum print_line_t power_print_line(struct trace_iterator *iter)
49 {
50 int ret = 0;
51 struct trace_entry *entry = iter->ent;
52 struct trace_power *field ;
53 struct power_trace *it;
54 struct trace_seq *s = &iter->seq;
55 struct timespec stamp;
56 struct timespec duration;
57
58 trace_assign_type(field, entry);
59 it = &field->state_data;
60 stamp = ktime_to_timespec(it->stamp);
61 duration = ktime_to_timespec(ktime_sub(it->end, it->stamp));
62
63 if (entry->type == TRACE_POWER) {
64 if (it->type == POWER_CSTATE)
65 ret = trace_seq_printf(s, "[%5ld.%09ld] CSTATE: Going to C%i on cpu %i for %ld.%09ld\n",
66 stamp.tv_sec,
67 stamp.tv_nsec,
68 it->state, iter->cpu,
69 duration.tv_sec,
70 duration.tv_nsec);
71 if (it->type == POWER_PSTATE)
72 ret = trace_seq_printf(s, "[%5ld.%09ld] PSTATE: Going to P%i on cpu %i\n",
73 stamp.tv_sec,
74 stamp.tv_nsec,
75 it->state, iter->cpu);
76 if (!ret)
77 return TRACE_TYPE_PARTIAL_LINE;
78 return TRACE_TYPE_HANDLED;
79 }
80 return TRACE_TYPE_UNHANDLED;
81 }
82
83 static struct tracer power_tracer __read_mostly =
84 {
85 .name = "power",
86 .init = power_trace_init,
87 .start = start_power_trace,
88 .stop = stop_power_trace,
89 .reset = stop_power_trace,
90 .print_line = power_print_line,
91 };
92
93 static int init_power_trace(void)
94 {
95 return register_tracer(&power_tracer);
96 }
97 device_initcall(init_power_trace);
98
99 void trace_power_start(struct power_trace *it, unsigned int type,
100 unsigned int level)
101 {
102 if (!trace_power_enabled)
103 return;
104
105 memset(it, 0, sizeof(struct power_trace));
106 it->state = level;
107 it->type = type;
108 it->stamp = ktime_get();
109 }
110 EXPORT_SYMBOL_GPL(trace_power_start);
111
112
113 void trace_power_end(struct power_trace *it)
114 {
115 struct ring_buffer_event *event;
116 struct trace_power *entry;
117 struct trace_array_cpu *data;
118 unsigned long irq_flags;
119 struct trace_array *tr = power_trace;
120
121 if (!trace_power_enabled)
122 return;
123
124 preempt_disable();
125 it->end = ktime_get();
126 data = tr->data[smp_processor_id()];
127
128 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
129 &irq_flags);
130 if (!event)
131 goto out;
132 entry = ring_buffer_event_data(event);
133 tracing_generic_entry_update(&entry->ent, 0, 0);
134 entry->ent.type = TRACE_POWER;
135 entry->state_data = *it;
136 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
137
138 trace_wake_up();
139
140 out:
141 preempt_enable();
142 }
143 EXPORT_SYMBOL_GPL(trace_power_end);
144
145 void trace_power_mark(struct power_trace *it, unsigned int type,
146 unsigned int level)
147 {
148 struct ring_buffer_event *event;
149 struct trace_power *entry;
150 struct trace_array_cpu *data;
151 unsigned long irq_flags;
152 struct trace_array *tr = power_trace;
153
154 if (!trace_power_enabled)
155 return;
156
157 memset(it, 0, sizeof(struct power_trace));
158 it->state = level;
159 it->type = type;
160 it->stamp = ktime_get();
161 preempt_disable();
162 it->end = it->stamp;
163 data = tr->data[smp_processor_id()];
164
165 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
166 &irq_flags);
167 if (!event)
168 goto out;
169 entry = ring_buffer_event_data(event);
170 tracing_generic_entry_update(&entry->ent, 0, 0);
171 entry->ent.type = TRACE_POWER;
172 entry->state_data = *it;
173 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
174
175 trace_wake_up();
176
177 out:
178 preempt_enable();
179 }
180 EXPORT_SYMBOL_GPL(trace_power_mark);
This page took 0.039469 seconds and 6 git commands to generate.