Commit | Line | Data |
---|---|---|
94c744b6 ACM |
1 | #include <linux/kernel.h> |
2 | ||
3 | #include <unistd.h> | |
4 | #include <sys/types.h> | |
5 | ||
6 | #include "session.h" | |
a328626b | 7 | #include "sort.h" |
94c744b6 ACM |
8 | #include "util.h" |
9 | ||
10 | static int perf_session__open(struct perf_session *self, bool force) | |
11 | { | |
12 | struct stat input_stat; | |
13 | ||
14 | self->fd = open(self->filename, O_RDONLY); | |
15 | if (self->fd < 0) { | |
16 | pr_err("failed to open file: %s", self->filename); | |
17 | if (!strcmp(self->filename, "perf.data")) | |
18 | pr_err(" (try 'perf record' first)"); | |
19 | pr_err("\n"); | |
20 | return -errno; | |
21 | } | |
22 | ||
23 | if (fstat(self->fd, &input_stat) < 0) | |
24 | goto out_close; | |
25 | ||
26 | if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) { | |
27 | pr_err("file %s not owned by current user or root\n", | |
28 | self->filename); | |
29 | goto out_close; | |
30 | } | |
31 | ||
32 | if (!input_stat.st_size) { | |
33 | pr_info("zero-sized file (%s), nothing to do!\n", | |
34 | self->filename); | |
35 | goto out_close; | |
36 | } | |
37 | ||
38 | if (perf_header__read(&self->header, self->fd) < 0) { | |
39 | pr_err("incompatible file format"); | |
40 | goto out_close; | |
41 | } | |
42 | ||
43 | self->size = input_stat.st_size; | |
44 | return 0; | |
45 | ||
46 | out_close: | |
47 | close(self->fd); | |
48 | self->fd = -1; | |
49 | return -1; | |
50 | } | |
51 | ||
75be6cf4 | 52 | struct perf_session *perf_session__new(const char *filename, int mode, bool force) |
94c744b6 | 53 | { |
b3165f41 | 54 | size_t len = filename ? strlen(filename) + 1 : 0; |
94c744b6 ACM |
55 | struct perf_session *self = zalloc(sizeof(*self) + len); |
56 | ||
57 | if (self == NULL) | |
58 | goto out; | |
59 | ||
60 | if (perf_header__init(&self->header) < 0) | |
4aa65636 | 61 | goto out_free; |
94c744b6 ACM |
62 | |
63 | memcpy(self->filename, filename, len); | |
b3165f41 ACM |
64 | self->threads = RB_ROOT; |
65 | self->last_match = NULL; | |
ec913369 ACM |
66 | self->mmap_window = 32; |
67 | self->cwd = NULL; | |
68 | self->cwdlen = 0; | |
4aa65636 | 69 | map_groups__init(&self->kmaps); |
94c744b6 | 70 | |
75be6cf4 | 71 | if (perf_session__create_kernel_maps(self) < 0) |
4aa65636 ACM |
72 | goto out_delete; |
73 | ||
74 | if (mode == O_RDONLY && perf_session__open(self, force) < 0) | |
75 | goto out_delete; | |
94c744b6 ACM |
76 | out: |
77 | return self; | |
4aa65636 | 78 | out_free: |
94c744b6 ACM |
79 | free(self); |
80 | return NULL; | |
4aa65636 ACM |
81 | out_delete: |
82 | perf_session__delete(self); | |
83 | return NULL; | |
94c744b6 ACM |
84 | } |
85 | ||
86 | void perf_session__delete(struct perf_session *self) | |
87 | { | |
88 | perf_header__exit(&self->header); | |
89 | close(self->fd); | |
ec913369 | 90 | free(self->cwd); |
94c744b6 ACM |
91 | free(self); |
92 | } | |
a328626b ACM |
93 | |
94 | static bool symbol__match_parent_regex(struct symbol *sym) | |
95 | { | |
96 | if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0)) | |
97 | return 1; | |
98 | ||
99 | return 0; | |
100 | } | |
101 | ||
102 | struct symbol **perf_session__resolve_callchain(struct perf_session *self, | |
103 | struct thread *thread, | |
104 | struct ip_callchain *chain, | |
105 | struct symbol **parent) | |
106 | { | |
107 | u8 cpumode = PERF_RECORD_MISC_USER; | |
108 | struct symbol **syms = NULL; | |
109 | unsigned int i; | |
110 | ||
d599db3f | 111 | if (symbol_conf.use_callchain) { |
a328626b ACM |
112 | syms = calloc(chain->nr, sizeof(*syms)); |
113 | if (!syms) { | |
114 | fprintf(stderr, "Can't allocate memory for symbols\n"); | |
115 | exit(-1); | |
116 | } | |
117 | } | |
118 | ||
119 | for (i = 0; i < chain->nr; i++) { | |
120 | u64 ip = chain->ips[i]; | |
121 | struct addr_location al; | |
122 | ||
123 | if (ip >= PERF_CONTEXT_MAX) { | |
124 | switch (ip) { | |
125 | case PERF_CONTEXT_HV: | |
126 | cpumode = PERF_RECORD_MISC_HYPERVISOR; break; | |
127 | case PERF_CONTEXT_KERNEL: | |
128 | cpumode = PERF_RECORD_MISC_KERNEL; break; | |
129 | case PERF_CONTEXT_USER: | |
130 | cpumode = PERF_RECORD_MISC_USER; break; | |
131 | default: | |
132 | break; | |
133 | } | |
134 | continue; | |
135 | } | |
136 | ||
137 | thread__find_addr_location(thread, self, cpumode, | |
138 | MAP__FUNCTION, ip, &al, NULL); | |
139 | if (al.sym != NULL) { | |
140 | if (sort__has_parent && !*parent && | |
141 | symbol__match_parent_regex(al.sym)) | |
142 | *parent = al.sym; | |
d599db3f | 143 | if (!symbol_conf.use_callchain) |
a328626b ACM |
144 | break; |
145 | syms[i] = al.sym; | |
146 | } | |
147 | } | |
148 | ||
149 | return syms; | |
150 | } | |
06aae590 ACM |
151 | |
152 | static int process_event_stub(event_t *event __used, | |
153 | struct perf_session *session __used) | |
154 | { | |
155 | dump_printf(": unhandled!\n"); | |
156 | return 0; | |
157 | } | |
158 | ||
159 | static void perf_event_ops__fill_defaults(struct perf_event_ops *handler) | |
160 | { | |
161 | if (handler->process_sample_event == NULL) | |
162 | handler->process_sample_event = process_event_stub; | |
163 | if (handler->process_mmap_event == NULL) | |
164 | handler->process_mmap_event = process_event_stub; | |
165 | if (handler->process_comm_event == NULL) | |
166 | handler->process_comm_event = process_event_stub; | |
167 | if (handler->process_fork_event == NULL) | |
168 | handler->process_fork_event = process_event_stub; | |
169 | if (handler->process_exit_event == NULL) | |
170 | handler->process_exit_event = process_event_stub; | |
171 | if (handler->process_lost_event == NULL) | |
172 | handler->process_lost_event = process_event_stub; | |
173 | if (handler->process_read_event == NULL) | |
174 | handler->process_read_event = process_event_stub; | |
175 | if (handler->process_throttle_event == NULL) | |
176 | handler->process_throttle_event = process_event_stub; | |
177 | if (handler->process_unthrottle_event == NULL) | |
178 | handler->process_unthrottle_event = process_event_stub; | |
179 | } | |
180 | ||
181 | static const char *event__name[] = { | |
182 | [0] = "TOTAL", | |
183 | [PERF_RECORD_MMAP] = "MMAP", | |
184 | [PERF_RECORD_LOST] = "LOST", | |
185 | [PERF_RECORD_COMM] = "COMM", | |
186 | [PERF_RECORD_EXIT] = "EXIT", | |
187 | [PERF_RECORD_THROTTLE] = "THROTTLE", | |
188 | [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE", | |
189 | [PERF_RECORD_FORK] = "FORK", | |
190 | [PERF_RECORD_READ] = "READ", | |
191 | [PERF_RECORD_SAMPLE] = "SAMPLE", | |
192 | }; | |
193 | ||
194 | unsigned long event__total[PERF_RECORD_MAX]; | |
195 | ||
196 | void event__print_totals(void) | |
197 | { | |
198 | int i; | |
199 | for (i = 0; i < PERF_RECORD_MAX; ++i) | |
200 | pr_info("%10s events: %10ld\n", | |
201 | event__name[i], event__total[i]); | |
202 | } | |
203 | ||
204 | static int perf_session__process_event(struct perf_session *self, | |
205 | event_t *event, | |
206 | struct perf_event_ops *ops, | |
207 | unsigned long offset, unsigned long head) | |
208 | { | |
209 | trace_event(event); | |
210 | ||
211 | if (event->header.type < PERF_RECORD_MAX) { | |
212 | dump_printf("%p [%p]: PERF_RECORD_%s", | |
213 | (void *)(offset + head), | |
214 | (void *)(long)(event->header.size), | |
215 | event__name[event->header.type]); | |
216 | ++event__total[0]; | |
217 | ++event__total[event->header.type]; | |
218 | } | |
219 | ||
220 | switch (event->header.type) { | |
221 | case PERF_RECORD_SAMPLE: | |
222 | return ops->process_sample_event(event, self); | |
223 | case PERF_RECORD_MMAP: | |
224 | return ops->process_mmap_event(event, self); | |
225 | case PERF_RECORD_COMM: | |
226 | return ops->process_comm_event(event, self); | |
227 | case PERF_RECORD_FORK: | |
228 | return ops->process_fork_event(event, self); | |
229 | case PERF_RECORD_EXIT: | |
230 | return ops->process_exit_event(event, self); | |
231 | case PERF_RECORD_LOST: | |
232 | return ops->process_lost_event(event, self); | |
233 | case PERF_RECORD_READ: | |
234 | return ops->process_read_event(event, self); | |
235 | case PERF_RECORD_THROTTLE: | |
236 | return ops->process_throttle_event(event, self); | |
237 | case PERF_RECORD_UNTHROTTLE: | |
238 | return ops->process_unthrottle_event(event, self); | |
239 | default: | |
240 | ops->total_unknown++; | |
241 | return -1; | |
242 | } | |
243 | } | |
244 | ||
245 | int perf_header__read_build_ids(int input, u64 offset, u64 size) | |
246 | { | |
247 | struct build_id_event bev; | |
248 | char filename[PATH_MAX]; | |
249 | u64 limit = offset + size; | |
250 | int err = -1; | |
251 | ||
252 | while (offset < limit) { | |
253 | struct dso *dso; | |
254 | ssize_t len; | |
255 | ||
256 | if (read(input, &bev, sizeof(bev)) != sizeof(bev)) | |
257 | goto out; | |
258 | ||
259 | len = bev.header.size - sizeof(bev); | |
260 | if (read(input, filename, len) != len) | |
261 | goto out; | |
262 | ||
263 | dso = dsos__findnew(filename); | |
264 | if (dso != NULL) | |
265 | dso__set_build_id(dso, &bev.build_id); | |
266 | ||
267 | offset += bev.header.size; | |
268 | } | |
269 | err = 0; | |
270 | out: | |
271 | return err; | |
272 | } | |
273 | ||
274 | static struct thread *perf_session__register_idle_thread(struct perf_session *self) | |
275 | { | |
276 | struct thread *thread = perf_session__findnew(self, 0); | |
277 | ||
278 | if (thread == NULL || thread__set_comm(thread, "swapper")) { | |
279 | pr_err("problem inserting idle task.\n"); | |
280 | thread = NULL; | |
281 | } | |
282 | ||
283 | return thread; | |
284 | } | |
285 | ||
286 | int perf_session__process_events(struct perf_session *self, | |
287 | struct perf_event_ops *ops) | |
288 | { | |
289 | int err; | |
290 | unsigned long head, shift; | |
291 | unsigned long offset = 0; | |
292 | size_t page_size; | |
293 | event_t *event; | |
294 | uint32_t size; | |
295 | char *buf; | |
296 | ||
297 | if (perf_session__register_idle_thread(self) == NULL) | |
298 | return -ENOMEM; | |
299 | ||
300 | perf_event_ops__fill_defaults(ops); | |
301 | ||
302 | page_size = getpagesize(); | |
303 | ||
304 | head = self->header.data_offset; | |
305 | self->sample_type = perf_header__sample_type(&self->header); | |
306 | ||
307 | err = -EINVAL; | |
308 | if (ops->sample_type_check && ops->sample_type_check(self) < 0) | |
309 | goto out_err; | |
310 | ||
311 | if (!ops->full_paths) { | |
312 | char bf[PATH_MAX]; | |
313 | ||
314 | if (getcwd(bf, sizeof(bf)) == NULL) { | |
315 | err = -errno; | |
316 | out_getcwd_err: | |
317 | pr_err("failed to get the current directory\n"); | |
318 | goto out_err; | |
319 | } | |
320 | self->cwd = strdup(bf); | |
321 | if (self->cwd == NULL) { | |
322 | err = -ENOMEM; | |
323 | goto out_getcwd_err; | |
324 | } | |
325 | self->cwdlen = strlen(self->cwd); | |
326 | } | |
327 | ||
328 | shift = page_size * (head / page_size); | |
329 | offset += shift; | |
330 | head -= shift; | |
331 | ||
332 | remap: | |
333 | buf = mmap(NULL, page_size * self->mmap_window, PROT_READ, | |
334 | MAP_SHARED, self->fd, offset); | |
335 | if (buf == MAP_FAILED) { | |
336 | pr_err("failed to mmap file\n"); | |
337 | err = -errno; | |
338 | goto out_err; | |
339 | } | |
340 | ||
341 | more: | |
342 | event = (event_t *)(buf + head); | |
343 | ||
344 | size = event->header.size; | |
345 | if (size == 0) | |
346 | size = 8; | |
347 | ||
348 | if (head + event->header.size >= page_size * self->mmap_window) { | |
349 | int munmap_ret; | |
350 | ||
351 | shift = page_size * (head / page_size); | |
352 | ||
353 | munmap_ret = munmap(buf, page_size * self->mmap_window); | |
354 | assert(munmap_ret == 0); | |
355 | ||
356 | offset += shift; | |
357 | head -= shift; | |
358 | goto remap; | |
359 | } | |
360 | ||
361 | size = event->header.size; | |
362 | ||
363 | dump_printf("\n%p [%p]: event: %d\n", | |
364 | (void *)(offset + head), | |
365 | (void *)(long)event->header.size, | |
366 | event->header.type); | |
367 | ||
368 | if (size == 0 || | |
369 | perf_session__process_event(self, event, ops, offset, head) < 0) { | |
370 | dump_printf("%p [%p]: skipping unknown header type: %d\n", | |
371 | (void *)(offset + head), | |
372 | (void *)(long)(event->header.size), | |
373 | event->header.type); | |
374 | /* | |
375 | * assume we lost track of the stream, check alignment, and | |
376 | * increment a single u64 in the hope to catch on again 'soon'. | |
377 | */ | |
378 | if (unlikely(head & 7)) | |
379 | head &= ~7ULL; | |
380 | ||
381 | size = 8; | |
382 | } | |
383 | ||
384 | head += size; | |
385 | ||
386 | if (offset + head >= self->header.data_offset + self->header.data_size) | |
387 | goto done; | |
388 | ||
389 | if (offset + head < self->size) | |
390 | goto more; | |
391 | done: | |
392 | err = 0; | |
393 | out_err: | |
394 | return err; | |
395 | } |