Commit | Line | Data |
---|---|---|
36994e58 FW |
1 | /* |
2 | * Memory allocator tracing | |
3 | * | |
4 | * Copyright (C) 2008 Eduard - Gabriel Munteanu | |
5 | * Copyright (C) 2008 Pekka Enberg <penberg@cs.helsinki.fi> | |
6 | * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com> | |
7 | */ | |
8 | ||
9 | #include <linux/dcache.h> | |
10 | #include <linux/debugfs.h> | |
11 | #include <linux/fs.h> | |
12 | #include <linux/seq_file.h> | |
ca2b84cb | 13 | #include <linux/tracepoint.h> |
36994e58 FW |
14 | #include <trace/kmemtrace.h> |
15 | ||
16 | #include "trace.h" | |
17 | #include "trace_output.h" | |
18 | ||
19 | /* Select an alternative, minimalistic output than the original one */ | |
20 | #define TRACE_KMEM_OPT_MINIMAL 0x1 | |
21 | ||
22 | static struct tracer_opt kmem_opts[] = { | |
23 | /* Default disable the minimalistic output */ | |
24 | { TRACER_OPT(kmem_minimalistic, TRACE_KMEM_OPT_MINIMAL) }, | |
25 | { } | |
26 | }; | |
27 | ||
28 | static struct tracer_flags kmem_tracer_flags = { | |
29 | .val = 0, | |
30 | .opts = kmem_opts | |
31 | }; | |
32 | ||
36994e58 FW |
33 | static struct trace_array *kmemtrace_array; |
34 | ||
ca2b84cb EGM |
35 | /* Trace allocations */ |
36 | static inline void kmemtrace_alloc(enum kmemtrace_type_id type_id, | |
37 | unsigned long call_site, | |
38 | const void *ptr, | |
39 | size_t bytes_req, | |
40 | size_t bytes_alloc, | |
41 | gfp_t gfp_flags, | |
42 | int node) | |
43 | { | |
44 | struct ring_buffer_event *event; | |
45 | struct kmemtrace_alloc_entry *entry; | |
46 | struct trace_array *tr = kmemtrace_array; | |
47 | ||
48 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); | |
49 | if (!event) | |
50 | return; | |
51 | entry = ring_buffer_event_data(event); | |
52 | tracing_generic_entry_update(&entry->ent, 0, 0); | |
53 | ||
54 | entry->ent.type = TRACE_KMEM_ALLOC; | |
55 | entry->call_site = call_site; | |
56 | entry->ptr = ptr; | |
57 | entry->bytes_req = bytes_req; | |
58 | entry->bytes_alloc = bytes_alloc; | |
59 | entry->gfp_flags = gfp_flags; | |
60 | entry->node = node; | |
61 | ||
62 | ring_buffer_unlock_commit(tr->buffer, event); | |
63 | ||
64 | trace_wake_up(); | |
65 | } | |
66 | ||
67 | static inline void kmemtrace_free(enum kmemtrace_type_id type_id, | |
68 | unsigned long call_site, | |
69 | const void *ptr) | |
70 | { | |
71 | struct ring_buffer_event *event; | |
72 | struct kmemtrace_free_entry *entry; | |
73 | struct trace_array *tr = kmemtrace_array; | |
74 | ||
75 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); | |
76 | if (!event) | |
77 | return; | |
78 | entry = ring_buffer_event_data(event); | |
79 | tracing_generic_entry_update(&entry->ent, 0, 0); | |
80 | ||
81 | entry->ent.type = TRACE_KMEM_FREE; | |
82 | entry->type_id = type_id; | |
83 | entry->call_site = call_site; | |
84 | entry->ptr = ptr; | |
85 | ||
86 | ring_buffer_unlock_commit(tr->buffer, event); | |
87 | ||
88 | trace_wake_up(); | |
89 | } | |
90 | ||
91 | static void kmemtrace_kmalloc(unsigned long call_site, | |
92 | const void *ptr, | |
93 | size_t bytes_req, | |
94 | size_t bytes_alloc, | |
95 | gfp_t gfp_flags) | |
96 | { | |
97 | kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr, | |
98 | bytes_req, bytes_alloc, gfp_flags, -1); | |
99 | } | |
100 | ||
101 | static void kmemtrace_kmem_cache_alloc(unsigned long call_site, | |
102 | const void *ptr, | |
103 | size_t bytes_req, | |
104 | size_t bytes_alloc, | |
105 | gfp_t gfp_flags) | |
106 | { | |
107 | kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr, | |
108 | bytes_req, bytes_alloc, gfp_flags, -1); | |
109 | } | |
110 | ||
111 | static void kmemtrace_kmalloc_node(unsigned long call_site, | |
112 | const void *ptr, | |
113 | size_t bytes_req, | |
114 | size_t bytes_alloc, | |
115 | gfp_t gfp_flags, | |
116 | int node) | |
117 | { | |
118 | kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr, | |
119 | bytes_req, bytes_alloc, gfp_flags, node); | |
120 | } | |
121 | ||
122 | static void kmemtrace_kmem_cache_alloc_node(unsigned long call_site, | |
123 | const void *ptr, | |
124 | size_t bytes_req, | |
125 | size_t bytes_alloc, | |
126 | gfp_t gfp_flags, | |
127 | int node) | |
128 | { | |
129 | kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr, | |
130 | bytes_req, bytes_alloc, gfp_flags, node); | |
131 | } | |
132 | ||
133 | static void kmemtrace_kfree(unsigned long call_site, const void *ptr) | |
134 | { | |
135 | kmemtrace_free(KMEMTRACE_TYPE_KMALLOC, call_site, ptr); | |
136 | } | |
137 | ||
138 | static void kmemtrace_kmem_cache_free(unsigned long call_site, const void *ptr) | |
139 | { | |
140 | kmemtrace_free(KMEMTRACE_TYPE_CACHE, call_site, ptr); | |
141 | } | |
142 | ||
143 | static int kmemtrace_start_probes(void) | |
144 | { | |
145 | int err; | |
146 | ||
147 | err = register_trace_kmalloc(kmemtrace_kmalloc); | |
148 | if (err) | |
149 | return err; | |
150 | err = register_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc); | |
151 | if (err) | |
152 | return err; | |
153 | err = register_trace_kmalloc_node(kmemtrace_kmalloc_node); | |
154 | if (err) | |
155 | return err; | |
156 | err = register_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node); | |
157 | if (err) | |
158 | return err; | |
159 | err = register_trace_kfree(kmemtrace_kfree); | |
160 | if (err) | |
161 | return err; | |
162 | err = register_trace_kmem_cache_free(kmemtrace_kmem_cache_free); | |
163 | ||
164 | return err; | |
165 | } | |
166 | ||
167 | static void kmemtrace_stop_probes(void) | |
168 | { | |
169 | unregister_trace_kmalloc(kmemtrace_kmalloc); | |
170 | unregister_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc); | |
171 | unregister_trace_kmalloc_node(kmemtrace_kmalloc_node); | |
172 | unregister_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node); | |
173 | unregister_trace_kfree(kmemtrace_kfree); | |
174 | unregister_trace_kmem_cache_free(kmemtrace_kmem_cache_free); | |
175 | } | |
176 | ||
36994e58 FW |
177 | static int kmem_trace_init(struct trace_array *tr) |
178 | { | |
179 | int cpu; | |
180 | kmemtrace_array = tr; | |
181 | ||
182 | for_each_cpu_mask(cpu, cpu_possible_map) | |
183 | tracing_reset(tr, cpu); | |
184 | ||
ca2b84cb | 185 | kmemtrace_start_probes(); |
36994e58 FW |
186 | |
187 | return 0; | |
188 | } | |
189 | ||
190 | static void kmem_trace_reset(struct trace_array *tr) | |
191 | { | |
ca2b84cb | 192 | kmemtrace_stop_probes(); |
36994e58 FW |
193 | } |
194 | ||
195 | static void kmemtrace_headers(struct seq_file *s) | |
196 | { | |
197 | /* Don't need headers for the original kmemtrace output */ | |
198 | if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)) | |
199 | return; | |
200 | ||
201 | seq_printf(s, "#\n"); | |
202 | seq_printf(s, "# ALLOC TYPE REQ GIVEN FLAGS " | |
203 | " POINTER NODE CALLER\n"); | |
204 | seq_printf(s, "# FREE | | | | " | |
205 | " | | | |\n"); | |
206 | seq_printf(s, "# |\n\n"); | |
207 | } | |
208 | ||
209 | /* | |
210 | * The two following functions give the original output from kmemtrace, | |
211 | * or something close to....perhaps they need some missing things | |
212 | */ | |
213 | static enum print_line_t | |
214 | kmemtrace_print_alloc_original(struct trace_iterator *iter, | |
215 | struct kmemtrace_alloc_entry *entry) | |
216 | { | |
217 | struct trace_seq *s = &iter->seq; | |
218 | int ret; | |
219 | ||
220 | /* Taken from the old linux/kmemtrace.h */ | |
221 | ret = trace_seq_printf(s, "type_id %d call_site %lu ptr %lu " | |
222 | "bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d\n", | |
223 | entry->type_id, entry->call_site, (unsigned long) entry->ptr, | |
224 | (unsigned long) entry->bytes_req, (unsigned long) entry->bytes_alloc, | |
225 | (unsigned long) entry->gfp_flags, entry->node); | |
226 | ||
227 | if (!ret) | |
228 | return TRACE_TYPE_PARTIAL_LINE; | |
229 | ||
230 | return TRACE_TYPE_HANDLED; | |
231 | } | |
232 | ||
233 | static enum print_line_t | |
234 | kmemtrace_print_free_original(struct trace_iterator *iter, | |
235 | struct kmemtrace_free_entry *entry) | |
236 | { | |
237 | struct trace_seq *s = &iter->seq; | |
238 | int ret; | |
239 | ||
240 | /* Taken from the old linux/kmemtrace.h */ | |
241 | ret = trace_seq_printf(s, "type_id %d call_site %lu ptr %lu\n", | |
242 | entry->type_id, entry->call_site, (unsigned long) entry->ptr); | |
243 | ||
244 | if (!ret) | |
245 | return TRACE_TYPE_PARTIAL_LINE; | |
246 | ||
247 | return TRACE_TYPE_HANDLED; | |
248 | } | |
249 | ||
250 | ||
251 | /* The two other following provide a more minimalistic output */ | |
252 | static enum print_line_t | |
253 | kmemtrace_print_alloc_compress(struct trace_iterator *iter, | |
254 | struct kmemtrace_alloc_entry *entry) | |
255 | { | |
256 | struct trace_seq *s = &iter->seq; | |
257 | int ret; | |
258 | ||
259 | /* Alloc entry */ | |
260 | ret = trace_seq_printf(s, " + "); | |
261 | if (!ret) | |
262 | return TRACE_TYPE_PARTIAL_LINE; | |
263 | ||
264 | /* Type */ | |
265 | switch (entry->type_id) { | |
266 | case KMEMTRACE_TYPE_KMALLOC: | |
267 | ret = trace_seq_printf(s, "K "); | |
268 | break; | |
269 | case KMEMTRACE_TYPE_CACHE: | |
270 | ret = trace_seq_printf(s, "C "); | |
271 | break; | |
272 | case KMEMTRACE_TYPE_PAGES: | |
273 | ret = trace_seq_printf(s, "P "); | |
274 | break; | |
275 | default: | |
276 | ret = trace_seq_printf(s, "? "); | |
277 | } | |
278 | ||
279 | if (!ret) | |
280 | return TRACE_TYPE_PARTIAL_LINE; | |
281 | ||
282 | /* Requested */ | |
ecf441b5 | 283 | ret = trace_seq_printf(s, "%4zu ", entry->bytes_req); |
36994e58 FW |
284 | if (!ret) |
285 | return TRACE_TYPE_PARTIAL_LINE; | |
286 | ||
287 | /* Allocated */ | |
ecf441b5 | 288 | ret = trace_seq_printf(s, "%4zu ", entry->bytes_alloc); |
36994e58 FW |
289 | if (!ret) |
290 | return TRACE_TYPE_PARTIAL_LINE; | |
291 | ||
292 | /* Flags | |
293 | * TODO: would be better to see the name of the GFP flag names | |
294 | */ | |
295 | ret = trace_seq_printf(s, "%08x ", entry->gfp_flags); | |
296 | if (!ret) | |
297 | return TRACE_TYPE_PARTIAL_LINE; | |
298 | ||
299 | /* Pointer to allocated */ | |
300 | ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr); | |
301 | if (!ret) | |
302 | return TRACE_TYPE_PARTIAL_LINE; | |
303 | ||
304 | /* Node */ | |
305 | ret = trace_seq_printf(s, "%4d ", entry->node); | |
306 | if (!ret) | |
307 | return TRACE_TYPE_PARTIAL_LINE; | |
308 | ||
309 | /* Call site */ | |
310 | ret = seq_print_ip_sym(s, entry->call_site, 0); | |
311 | if (!ret) | |
312 | return TRACE_TYPE_PARTIAL_LINE; | |
313 | ||
314 | if (!trace_seq_printf(s, "\n")) | |
315 | return TRACE_TYPE_PARTIAL_LINE; | |
316 | ||
317 | return TRACE_TYPE_HANDLED; | |
318 | } | |
319 | ||
320 | static enum print_line_t | |
321 | kmemtrace_print_free_compress(struct trace_iterator *iter, | |
322 | struct kmemtrace_free_entry *entry) | |
323 | { | |
324 | struct trace_seq *s = &iter->seq; | |
325 | int ret; | |
326 | ||
327 | /* Free entry */ | |
328 | ret = trace_seq_printf(s, " - "); | |
329 | if (!ret) | |
330 | return TRACE_TYPE_PARTIAL_LINE; | |
331 | ||
332 | /* Type */ | |
333 | switch (entry->type_id) { | |
334 | case KMEMTRACE_TYPE_KMALLOC: | |
335 | ret = trace_seq_printf(s, "K "); | |
336 | break; | |
337 | case KMEMTRACE_TYPE_CACHE: | |
338 | ret = trace_seq_printf(s, "C "); | |
339 | break; | |
340 | case KMEMTRACE_TYPE_PAGES: | |
341 | ret = trace_seq_printf(s, "P "); | |
342 | break; | |
343 | default: | |
344 | ret = trace_seq_printf(s, "? "); | |
345 | } | |
346 | ||
347 | if (!ret) | |
348 | return TRACE_TYPE_PARTIAL_LINE; | |
349 | ||
350 | /* Skip requested/allocated/flags */ | |
351 | ret = trace_seq_printf(s, " "); | |
352 | if (!ret) | |
353 | return TRACE_TYPE_PARTIAL_LINE; | |
354 | ||
355 | /* Pointer to allocated */ | |
356 | ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr); | |
357 | if (!ret) | |
358 | return TRACE_TYPE_PARTIAL_LINE; | |
359 | ||
360 | /* Skip node */ | |
361 | ret = trace_seq_printf(s, " "); | |
362 | if (!ret) | |
363 | return TRACE_TYPE_PARTIAL_LINE; | |
364 | ||
365 | /* Call site */ | |
366 | ret = seq_print_ip_sym(s, entry->call_site, 0); | |
367 | if (!ret) | |
368 | return TRACE_TYPE_PARTIAL_LINE; | |
369 | ||
370 | if (!trace_seq_printf(s, "\n")) | |
371 | return TRACE_TYPE_PARTIAL_LINE; | |
372 | ||
373 | return TRACE_TYPE_HANDLED; | |
374 | } | |
375 | ||
376 | static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter) | |
377 | { | |
378 | struct trace_entry *entry = iter->ent; | |
379 | ||
380 | switch (entry->type) { | |
381 | case TRACE_KMEM_ALLOC: { | |
382 | struct kmemtrace_alloc_entry *field; | |
383 | trace_assign_type(field, entry); | |
384 | if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL) | |
385 | return kmemtrace_print_alloc_compress(iter, field); | |
386 | else | |
387 | return kmemtrace_print_alloc_original(iter, field); | |
388 | } | |
389 | ||
390 | case TRACE_KMEM_FREE: { | |
391 | struct kmemtrace_free_entry *field; | |
392 | trace_assign_type(field, entry); | |
393 | if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL) | |
394 | return kmemtrace_print_free_compress(iter, field); | |
395 | else | |
396 | return kmemtrace_print_free_original(iter, field); | |
397 | } | |
398 | ||
399 | default: | |
400 | return TRACE_TYPE_UNHANDLED; | |
401 | } | |
402 | } | |
403 | ||
36994e58 FW |
404 | static struct tracer kmem_tracer __read_mostly = { |
405 | .name = "kmemtrace", | |
406 | .init = kmem_trace_init, | |
407 | .reset = kmem_trace_reset, | |
408 | .print_line = kmemtrace_print_line, | |
409 | .print_header = kmemtrace_headers, | |
410 | .flags = &kmem_tracer_flags | |
411 | }; | |
412 | ||
3e806802 IM |
413 | void kmemtrace_init(void) |
414 | { | |
415 | /* earliest opportunity to start kmem tracing */ | |
416 | } | |
417 | ||
36994e58 FW |
418 | static int __init init_kmem_tracer(void) |
419 | { | |
420 | return register_tracer(&kmem_tracer); | |
421 | } | |
422 | ||
423 | device_initcall(init_kmem_tracer); |