powerpc: Separate PACA fields for server CPUs
[deliverable/linux.git] / kernel / trace / trace_events_stage_3.h
1 /*
2 * Stage 3 of the trace events.
3 *
4 * Override the macros in <trace/trace_event_types.h> to include the following:
5 *
6 * static void ftrace_event_<call>(proto)
7 * {
8 * event_trace_printk(_RET_IP_, "<call>: " <fmt>);
9 * }
10 *
11 * static int ftrace_reg_event_<call>(void)
12 * {
13 * int ret;
14 *
15 * ret = register_trace_<call>(ftrace_event_<call>);
16 * if (!ret)
17 * pr_info("event trace: Could not activate trace point "
18 * "probe to <call>");
19 * return ret;
20 * }
21 *
22 * static void ftrace_unreg_event_<call>(void)
23 * {
24 * unregister_trace_<call>(ftrace_event_<call>);
25 * }
26 *
27 * For those macros defined with TRACE_FORMAT:
28 *
29 * static struct ftrace_event_call __used
30 * __attribute__((__aligned__(4)))
31 * __attribute__((section("_ftrace_events"))) event_<call> = {
32 * .name = "<call>",
33 * .regfunc = ftrace_reg_event_<call>,
34 * .unregfunc = ftrace_unreg_event_<call>,
35 * }
36 *
37 *
38 * For those macros defined with TRACE_EVENT:
39 *
40 * static struct ftrace_event_call event_<call>;
41 *
42 * static void ftrace_raw_event_<call>(proto)
43 * {
44 * struct ring_buffer_event *event;
45 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
46 * unsigned long irq_flags;
47 * int pc;
48 *
49 * local_save_flags(irq_flags);
50 * pc = preempt_count();
51 *
52 * event = trace_current_buffer_lock_reserve(event_<call>.id,
53 * sizeof(struct ftrace_raw_<call>),
54 * irq_flags, pc);
55 * if (!event)
56 * return;
57 * entry = ring_buffer_event_data(event);
58 *
59 * <assign>; <-- Here we assign the entries by the __field and
60 * __array macros.
61 *
62 * trace_current_buffer_unlock_commit(event, irq_flags, pc);
63 * }
64 *
65 * static int ftrace_raw_reg_event_<call>(void)
66 * {
67 * int ret;
68 *
69 * ret = register_trace_<call>(ftrace_raw_event_<call>);
70 * if (!ret)
71 * pr_info("event trace: Could not activate trace point "
72 * "probe to <call>");
73 * return ret;
74 * }
75 *
76 * static void ftrace_unreg_event_<call>(void)
77 * {
78 * unregister_trace_<call>(ftrace_raw_event_<call>);
79 * }
80 *
81 * static struct trace_event ftrace_event_type_<call> = {
82 * .trace = ftrace_raw_output_<call>, <-- stage 2
83 * };
84 *
85 * static int ftrace_raw_init_event_<call>(void)
86 * {
87 * int id;
88 *
89 * id = register_ftrace_event(&ftrace_event_type_<call>);
90 * if (!id)
91 * return -ENODEV;
92 * event_<call>.id = id;
93 * return 0;
94 * }
95 *
96 * static struct ftrace_event_call __used
97 * __attribute__((__aligned__(4)))
98 * __attribute__((section("_ftrace_events"))) event_<call> = {
99 * .name = "<call>",
100 * .system = "<system>",
101 * .raw_init = ftrace_raw_init_event_<call>,
102 * .regfunc = ftrace_reg_event_<call>,
103 * .unregfunc = ftrace_unreg_event_<call>,
104 * .show_format = ftrace_format_<call>,
105 * }
106 *
107 */
108
109 #undef TP_FMT
110 #define TP_FMT(fmt, args...) fmt "\n", ##args
111
112 #ifdef CONFIG_EVENT_PROFILE
113 #define _TRACE_PROFILE(call, proto, args) \
114 static void ftrace_profile_##call(proto) \
115 { \
116 extern void perf_tpcounter_event(int); \
117 perf_tpcounter_event(event_##call.id); \
118 } \
119 \
120 static int ftrace_profile_enable_##call(struct ftrace_event_call *call) \
121 { \
122 int ret = 0; \
123 \
124 if (!atomic_inc_return(&call->profile_count)) \
125 ret = register_trace_##call(ftrace_profile_##call); \
126 \
127 return ret; \
128 } \
129 \
130 static void ftrace_profile_disable_##call(struct ftrace_event_call *call) \
131 { \
132 if (atomic_add_negative(-1, &call->profile_count)) \
133 unregister_trace_##call(ftrace_profile_##call); \
134 }
135
136 #define _TRACE_PROFILE_INIT(call) \
137 .profile_count = ATOMIC_INIT(-1), \
138 .profile_enable = ftrace_profile_enable_##call, \
139 .profile_disable = ftrace_profile_disable_##call,
140
141 #else
142 #define _TRACE_PROFILE(call, proto, args)
143 #define _TRACE_PROFILE_INIT(call)
144 #endif
145
146 #define _TRACE_FORMAT(call, proto, args, fmt) \
147 static void ftrace_event_##call(proto) \
148 { \
149 event_trace_printk(_RET_IP_, #call ": " fmt); \
150 } \
151 \
152 static int ftrace_reg_event_##call(void) \
153 { \
154 int ret; \
155 \
156 ret = register_trace_##call(ftrace_event_##call); \
157 if (ret) \
158 pr_info("event trace: Could not activate trace point " \
159 "probe to " #call "\n"); \
160 return ret; \
161 } \
162 \
163 static void ftrace_unreg_event_##call(void) \
164 { \
165 unregister_trace_##call(ftrace_event_##call); \
166 } \
167 \
168 static struct ftrace_event_call event_##call; \
169 \
170 static int ftrace_init_event_##call(void) \
171 { \
172 int id; \
173 \
174 id = register_ftrace_event(NULL); \
175 if (!id) \
176 return -ENODEV; \
177 event_##call.id = id; \
178 return 0; \
179 }
180
181 #undef TRACE_FORMAT
182 #define TRACE_FORMAT(call, proto, args, fmt) \
183 _TRACE_FORMAT(call, PARAMS(proto), PARAMS(args), PARAMS(fmt)) \
184 _TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \
185 static struct ftrace_event_call __used \
186 __attribute__((__aligned__(4))) \
187 __attribute__((section("_ftrace_events"))) event_##call = { \
188 .name = #call, \
189 .system = __stringify(TRACE_SYSTEM), \
190 .raw_init = ftrace_init_event_##call, \
191 .regfunc = ftrace_reg_event_##call, \
192 .unregfunc = ftrace_unreg_event_##call, \
193 _TRACE_PROFILE_INIT(call) \
194 }
195
196 #undef __entry
197 #define __entry entry
198
199 #undef TRACE_EVENT
200 #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
201 _TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \
202 \
203 static struct ftrace_event_call event_##call; \
204 \
205 static void ftrace_raw_event_##call(proto) \
206 { \
207 struct ftrace_event_call *call = &event_##call; \
208 struct ring_buffer_event *event; \
209 struct ftrace_raw_##call *entry; \
210 unsigned long irq_flags; \
211 int pc; \
212 \
213 local_save_flags(irq_flags); \
214 pc = preempt_count(); \
215 \
216 event = trace_current_buffer_lock_reserve(event_##call.id, \
217 sizeof(struct ftrace_raw_##call), \
218 irq_flags, pc); \
219 if (!event) \
220 return; \
221 entry = ring_buffer_event_data(event); \
222 \
223 assign; \
224 \
225 if (call->preds && !filter_match_preds(call, entry)) \
226 ring_buffer_event_discard(event); \
227 \
228 trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \
229 \
230 } \
231 \
232 static int ftrace_raw_reg_event_##call(void) \
233 { \
234 int ret; \
235 \
236 ret = register_trace_##call(ftrace_raw_event_##call); \
237 if (ret) \
238 pr_info("event trace: Could not activate trace point " \
239 "probe to " #call "\n"); \
240 return ret; \
241 } \
242 \
243 static void ftrace_raw_unreg_event_##call(void) \
244 { \
245 unregister_trace_##call(ftrace_raw_event_##call); \
246 } \
247 \
248 static struct trace_event ftrace_event_type_##call = { \
249 .trace = ftrace_raw_output_##call, \
250 }; \
251 \
252 static int ftrace_raw_init_event_##call(void) \
253 { \
254 int id; \
255 \
256 id = register_ftrace_event(&ftrace_event_type_##call); \
257 if (!id) \
258 return -ENODEV; \
259 event_##call.id = id; \
260 INIT_LIST_HEAD(&event_##call.fields); \
261 return 0; \
262 } \
263 \
264 static struct ftrace_event_call __used \
265 __attribute__((__aligned__(4))) \
266 __attribute__((section("_ftrace_events"))) event_##call = { \
267 .name = #call, \
268 .system = __stringify(TRACE_SYSTEM), \
269 .raw_init = ftrace_raw_init_event_##call, \
270 .regfunc = ftrace_raw_reg_event_##call, \
271 .unregfunc = ftrace_raw_unreg_event_##call, \
272 .show_format = ftrace_format_##call, \
273 .define_fields = ftrace_define_fields_##call, \
274 _TRACE_PROFILE_INIT(call) \
275 }
276
277 #include <trace/trace_event_types.h>
278
279 #undef _TRACE_PROFILE
280 #undef _TRACE_PROFILE_INIT
281
This page took 0.044009 seconds and 5 git commands to generate.