Merge branch 'for-linus' into for-next
[deliverable/linux.git] / tools / perf / builtin-test.c
CommitLineData
1c6a800c
ACM
1/*
2 * builtin-test.c
3 *
4 * Builtin regression testing command: ever growing number of sanity tests
5 */
6#include "builtin.h"
7
8#include "util/cache.h"
9#include "util/debug.h"
ebf294bf 10#include "util/debugfs.h"
de5fa3a8 11#include "util/evlist.h"
1c6a800c 12#include "util/parse-options.h"
de5fa3a8 13#include "util/parse-events.h"
1c6a800c 14#include "util/symbol.h"
fd78260b 15#include "util/thread_map.h"
cd82a32e 16#include "util/pmu.h"
13b62567 17#include "../../include/linux/hw_breakpoint.h"
1c6a800c 18
08aa0d1f
PZ
19#include <sys/mman.h>
20
1c6a800c
ACM
21static int vmlinux_matches_kallsyms_filter(struct map *map __used, struct symbol *sym)
22{
23 bool *visited = symbol__priv(sym);
24 *visited = true;
25 return 0;
26}
27
28static int test__vmlinux_matches_kallsyms(void)
29{
30 int err = -1;
31 struct rb_node *nd;
32 struct symbol *sym;
33 struct map *kallsyms_map, *vmlinux_map;
34 struct machine kallsyms, vmlinux;
35 enum map_type type = MAP__FUNCTION;
e60770a0 36 long page_size = sysconf(_SC_PAGE_SIZE);
1c6a800c
ACM
37 struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", };
38
39 /*
40 * Step 1:
41 *
42 * Init the machines that will hold kernel, modules obtained from
43 * both vmlinux + .ko files and from /proc/kallsyms split by modules.
44 */
45 machine__init(&kallsyms, "", HOST_KERNEL_ID);
46 machine__init(&vmlinux, "", HOST_KERNEL_ID);
47
48 /*
49 * Step 2:
50 *
51 * Create the kernel maps for kallsyms and the DSO where we will then
52 * load /proc/kallsyms. Also create the modules maps from /proc/modules
53 * and find the .ko files that match them in /lib/modules/`uname -r`/.
54 */
55 if (machine__create_kernel_maps(&kallsyms) < 0) {
56 pr_debug("machine__create_kernel_maps ");
57 return -1;
58 }
59
60 /*
61 * Step 3:
62 *
63 * Load and split /proc/kallsyms into multiple maps, one per module.
64 */
65 if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms", type, NULL) <= 0) {
66 pr_debug("dso__load_kallsyms ");
67 goto out;
68 }
69
70 /*
71 * Step 4:
72 *
73 * kallsyms will be internally on demand sorted by name so that we can
74 * find the reference relocation * symbol, i.e. the symbol we will use
75 * to see if the running kernel was relocated by checking if it has the
76 * same value in the vmlinux file we load.
77 */
78 kallsyms_map = machine__kernel_map(&kallsyms, type);
79
80 sym = map__find_symbol_by_name(kallsyms_map, ref_reloc_sym.name, NULL);
81 if (sym == NULL) {
82 pr_debug("dso__find_symbol_by_name ");
83 goto out;
84 }
85
86 ref_reloc_sym.addr = sym->start;
87
88 /*
89 * Step 5:
90 *
91 * Now repeat step 2, this time for the vmlinux file we'll auto-locate.
92 */
93 if (machine__create_kernel_maps(&vmlinux) < 0) {
94 pr_debug("machine__create_kernel_maps ");
95 goto out;
96 }
97
98 vmlinux_map = machine__kernel_map(&vmlinux, type);
99 map__kmap(vmlinux_map)->ref_reloc_sym = &ref_reloc_sym;
100
101 /*
102 * Step 6:
103 *
104 * Locate a vmlinux file in the vmlinux path that has a buildid that
105 * matches the one of the running kernel.
106 *
107 * While doing that look if we find the ref reloc symbol, if we find it
108 * we'll have its ref_reloc_symbol.unrelocated_addr and then
109 * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines
110 * to fixup the symbols.
111 */
112 if (machine__load_vmlinux_path(&vmlinux, type,
113 vmlinux_matches_kallsyms_filter) <= 0) {
114 pr_debug("machine__load_vmlinux_path ");
115 goto out;
116 }
117
118 err = 0;
119 /*
120 * Step 7:
121 *
122 * Now look at the symbols in the vmlinux DSO and check if we find all of them
123 * in the kallsyms dso. For the ones that are in both, check its names and
124 * end addresses too.
125 */
126 for (nd = rb_first(&vmlinux_map->dso->symbols[type]); nd; nd = rb_next(nd)) {
d3678758
ACM
127 struct symbol *pair, *first_pair;
128 bool backwards = true;
1c6a800c
ACM
129
130 sym = rb_entry(nd, struct symbol, rb_node);
d3678758
ACM
131
132 if (sym->start == sym->end)
133 continue;
134
135 first_pair = machine__find_kernel_symbol(&kallsyms, type, sym->start, NULL, NULL);
136 pair = first_pair;
1c6a800c
ACM
137
138 if (pair && pair->start == sym->start) {
139next_pair:
140 if (strcmp(sym->name, pair->name) == 0) {
141 /*
142 * kallsyms don't have the symbol end, so we
143 * set that by using the next symbol start - 1,
144 * in some cases we get this up to a page
145 * wrong, trace_kmalloc when I was developing
146 * this code was one such example, 2106 bytes
147 * off the real size. More than that and we
148 * _really_ have a problem.
149 */
150 s64 skew = sym->end - pair->end;
151 if (llabs(skew) < page_size)
152 continue;
153
9486aa38 154 pr_debug("%#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n",
1c6a800c
ACM
155 sym->start, sym->name, sym->end, pair->end);
156 } else {
d3678758
ACM
157 struct rb_node *nnd;
158detour:
159 nnd = backwards ? rb_prev(&pair->rb_node) :
160 rb_next(&pair->rb_node);
1c6a800c
ACM
161 if (nnd) {
162 struct symbol *next = rb_entry(nnd, struct symbol, rb_node);
163
164 if (next->start == sym->start) {
165 pair = next;
166 goto next_pair;
167 }
168 }
d3678758
ACM
169
170 if (backwards) {
171 backwards = false;
172 pair = first_pair;
173 goto detour;
174 }
175
9486aa38 176 pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n",
1c6a800c
ACM
177 sym->start, sym->name, pair->name);
178 }
179 } else
9486aa38 180 pr_debug("%#" PRIx64 ": %s not on kallsyms\n", sym->start, sym->name);
1c6a800c
ACM
181
182 err = -1;
183 }
184
185 if (!verbose)
186 goto out;
187
188 pr_info("Maps only in vmlinux:\n");
189
190 for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
191 struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
192 /*
193 * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while
194 * the kernel will have the path for the vmlinux file being used,
195 * so use the short name, less descriptive but the same ("[kernel]" in
196 * both cases.
197 */
198 pair = map_groups__find_by_name(&kallsyms.kmaps, type,
199 (pos->dso->kernel ?
200 pos->dso->short_name :
201 pos->dso->name));
202 if (pair)
203 pair->priv = 1;
204 else
205 map__fprintf(pos, stderr);
206 }
207
208 pr_info("Maps in vmlinux with a different name in kallsyms:\n");
209
210 for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
211 struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
212
213 pair = map_groups__find(&kallsyms.kmaps, type, pos->start);
214 if (pair == NULL || pair->priv)
215 continue;
216
217 if (pair->start == pos->start) {
218 pair->priv = 1;
9486aa38 219 pr_info(" %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as",
1c6a800c
ACM
220 pos->start, pos->end, pos->pgoff, pos->dso->name);
221 if (pos->pgoff != pair->pgoff || pos->end != pair->end)
9486aa38 222 pr_info(": \n*%" PRIx64 "-%" PRIx64 " %" PRIx64 "",
1c6a800c
ACM
223 pair->start, pair->end, pair->pgoff);
224 pr_info(" %s\n", pair->dso->name);
225 pair->priv = 1;
226 }
227 }
228
229 pr_info("Maps only in kallsyms:\n");
230
231 for (nd = rb_first(&kallsyms.kmaps.maps[type]);
232 nd; nd = rb_next(nd)) {
233 struct map *pos = rb_entry(nd, struct map, rb_node);
234
235 if (!pos->priv)
236 map__fprintf(pos, stderr);
237 }
238out:
239 return err;
240}
241
0252208e 242#include "util/cpumap.h"
d854861c
ACM
243#include "util/evsel.h"
244#include <sys/types.h>
245
de5fa3a8 246static int trace_event__id(const char *evname)
d854861c
ACM
247{
248 char *filename;
249 int err = -1, fd;
250
251 if (asprintf(&filename,
baf040a0 252 "%s/syscalls/%s/id",
ebf294bf 253 tracing_events_path, evname) < 0)
d854861c
ACM
254 return -1;
255
256 fd = open(filename, O_RDONLY);
257 if (fd >= 0) {
258 char id[16];
259 if (read(fd, id, sizeof(id)) > 0)
260 err = atoi(id);
261 close(fd);
262 }
263
264 free(filename);
265 return err;
266}
267
268static int test__open_syscall_event(void)
269{
270 int err = -1, fd;
271 struct thread_map *threads;
272 struct perf_evsel *evsel;
23a2f3ab 273 struct perf_event_attr attr;
d854861c
ACM
274 unsigned int nr_open_calls = 111, i;
275 int id = trace_event__id("sys_enter_open");
276
277 if (id < 0) {
454a3bbe 278 pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
d854861c
ACM
279 return -1;
280 }
281
0d37aa34 282 threads = thread_map__new(-1, getpid(), UINT_MAX);
d854861c 283 if (threads == NULL) {
454a3bbe 284 pr_debug("thread_map__new\n");
d854861c
ACM
285 return -1;
286 }
287
23a2f3ab
LM
288 memset(&attr, 0, sizeof(attr));
289 attr.type = PERF_TYPE_TRACEPOINT;
290 attr.config = id;
291 evsel = perf_evsel__new(&attr, 0);
d854861c 292 if (evsel == NULL) {
454a3bbe 293 pr_debug("perf_evsel__new\n");
d854861c
ACM
294 goto out_thread_map_delete;
295 }
296
727ab04e 297 if (perf_evsel__open_per_thread(evsel, threads, false, NULL) < 0) {
454a3bbe
ACM
298 pr_debug("failed to open counter: %s, "
299 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
300 strerror(errno));
d854861c
ACM
301 goto out_evsel_delete;
302 }
303
304 for (i = 0; i < nr_open_calls; ++i) {
305 fd = open("/etc/passwd", O_RDONLY);
306 close(fd);
307 }
308
309 if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) {
5d2cd909 310 pr_debug("perf_evsel__read_on_cpu\n");
d854861c
ACM
311 goto out_close_fd;
312 }
313
454a3bbe 314 if (evsel->counts->cpu[0].val != nr_open_calls) {
9486aa38 315 pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n",
d854861c 316 nr_open_calls, evsel->counts->cpu[0].val);
454a3bbe
ACM
317 goto out_close_fd;
318 }
d854861c
ACM
319
320 err = 0;
321out_close_fd:
322 perf_evsel__close_fd(evsel, 1, threads->nr);
323out_evsel_delete:
324 perf_evsel__delete(evsel);
325out_thread_map_delete:
326 thread_map__delete(threads);
327 return err;
328}
329
0252208e
ACM
330#include <sched.h>
331
332static int test__open_syscall_event_on_all_cpus(void)
333{
334 int err = -1, fd, cpu;
335 struct thread_map *threads;
336 struct cpu_map *cpus;
337 struct perf_evsel *evsel;
338 struct perf_event_attr attr;
339 unsigned int nr_open_calls = 111, i;
57b84e53 340 cpu_set_t cpu_set;
0252208e
ACM
341 int id = trace_event__id("sys_enter_open");
342
343 if (id < 0) {
344 pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
345 return -1;
346 }
347
0d37aa34 348 threads = thread_map__new(-1, getpid(), UINT_MAX);
0252208e
ACM
349 if (threads == NULL) {
350 pr_debug("thread_map__new\n");
351 return -1;
352 }
353
354 cpus = cpu_map__new(NULL);
98d77b78
HP
355 if (cpus == NULL) {
356 pr_debug("cpu_map__new\n");
357 goto out_thread_map_delete;
0252208e
ACM
358 }
359
0252208e 360
57b84e53 361 CPU_ZERO(&cpu_set);
0252208e
ACM
362
363 memset(&attr, 0, sizeof(attr));
364 attr.type = PERF_TYPE_TRACEPOINT;
365 attr.config = id;
366 evsel = perf_evsel__new(&attr, 0);
367 if (evsel == NULL) {
368 pr_debug("perf_evsel__new\n");
57b84e53 369 goto out_thread_map_delete;
0252208e
ACM
370 }
371
727ab04e 372 if (perf_evsel__open(evsel, cpus, threads, false, NULL) < 0) {
0252208e
ACM
373 pr_debug("failed to open counter: %s, "
374 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
375 strerror(errno));
376 goto out_evsel_delete;
377 }
378
379 for (cpu = 0; cpu < cpus->nr; ++cpu) {
380 unsigned int ncalls = nr_open_calls + cpu;
57b84e53
ACM
381 /*
382 * XXX eventually lift this restriction in a way that
383 * keeps perf building on older glibc installations
384 * without CPU_ALLOC. 1024 cpus in 2010 still seems
385 * a reasonable upper limit tho :-)
386 */
387 if (cpus->map[cpu] >= CPU_SETSIZE) {
388 pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
389 continue;
390 }
0252208e 391
57b84e53
ACM
392 CPU_SET(cpus->map[cpu], &cpu_set);
393 if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
ffb5e0fb
HP
394 pr_debug("sched_setaffinity() failed on CPU %d: %s ",
395 cpus->map[cpu],
396 strerror(errno));
397 goto out_close_fd;
398 }
0252208e
ACM
399 for (i = 0; i < ncalls; ++i) {
400 fd = open("/etc/passwd", O_RDONLY);
401 close(fd);
402 }
57b84e53 403 CPU_CLR(cpus->map[cpu], &cpu_set);
0252208e
ACM
404 }
405
406 /*
407 * Here we need to explicitely preallocate the counts, as if
408 * we use the auto allocation it will allocate just for 1 cpu,
409 * as we start by cpu 0.
410 */
411 if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) {
412 pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
413 goto out_close_fd;
414 }
415
d2af9687
ACM
416 err = 0;
417
0252208e
ACM
418 for (cpu = 0; cpu < cpus->nr; ++cpu) {
419 unsigned int expected;
420
57b84e53
ACM
421 if (cpus->map[cpu] >= CPU_SETSIZE)
422 continue;
423
0252208e 424 if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
5d2cd909 425 pr_debug("perf_evsel__read_on_cpu\n");
d2af9687
ACM
426 err = -1;
427 break;
0252208e
ACM
428 }
429
430 expected = nr_open_calls + cpu;
431 if (evsel->counts->cpu[cpu].val != expected) {
9486aa38 432 pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
ffb5e0fb 433 expected, cpus->map[cpu], evsel->counts->cpu[cpu].val);
d2af9687 434 err = -1;
0252208e
ACM
435 }
436 }
437
0252208e
ACM
438out_close_fd:
439 perf_evsel__close_fd(evsel, 1, threads->nr);
440out_evsel_delete:
441 perf_evsel__delete(evsel);
0252208e
ACM
442out_thread_map_delete:
443 thread_map__delete(threads);
444 return err;
445}
446
de5fa3a8
ACM
447/*
448 * This test will generate random numbers of calls to some getpid syscalls,
449 * then establish an mmap for a group of events that are created to monitor
450 * the syscalls.
451 *
452 * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated
453 * sample.id field to map back to its respective perf_evsel instance.
454 *
455 * Then it checks if the number of syscalls reported as perf events by
456 * the kernel corresponds to the number of syscalls made.
457 */
458static int test__basic_mmap(void)
459{
460 int err = -1;
8115d60c 461 union perf_event *event;
de5fa3a8 462 struct thread_map *threads;
de5fa3a8
ACM
463 struct cpu_map *cpus;
464 struct perf_evlist *evlist;
465 struct perf_event_attr attr = {
466 .type = PERF_TYPE_TRACEPOINT,
467 .read_format = PERF_FORMAT_ID,
468 .sample_type = PERF_SAMPLE_ID,
469 .watermark = 0,
470 };
471 cpu_set_t cpu_set;
472 const char *syscall_names[] = { "getsid", "getppid", "getpgrp",
473 "getpgid", };
474 pid_t (*syscalls[])(void) = { (void *)getsid, getppid, getpgrp,
475 (void*)getpgid };
476#define nsyscalls ARRAY_SIZE(syscall_names)
477 int ids[nsyscalls];
478 unsigned int nr_events[nsyscalls],
479 expected_nr_events[nsyscalls], i, j;
480 struct perf_evsel *evsels[nsyscalls], *evsel;
481
482 for (i = 0; i < nsyscalls; ++i) {
483 char name[64];
484
485 snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
486 ids[i] = trace_event__id(name);
487 if (ids[i] < 0) {
488 pr_debug("Is debugfs mounted on /sys/kernel/debug?\n");
489 return -1;
490 }
491 nr_events[i] = 0;
492 expected_nr_events[i] = random() % 257;
493 }
494
0d37aa34 495 threads = thread_map__new(-1, getpid(), UINT_MAX);
de5fa3a8
ACM
496 if (threads == NULL) {
497 pr_debug("thread_map__new\n");
498 return -1;
499 }
500
501 cpus = cpu_map__new(NULL);
54489c18
HP
502 if (cpus == NULL) {
503 pr_debug("cpu_map__new\n");
de5fa3a8
ACM
504 goto out_free_threads;
505 }
506
507 CPU_ZERO(&cpu_set);
508 CPU_SET(cpus->map[0], &cpu_set);
509 sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
510 if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
511 pr_debug("sched_setaffinity() failed on CPU %d: %s ",
512 cpus->map[0], strerror(errno));
513 goto out_free_cpus;
514 }
515
7e2ed097 516 evlist = perf_evlist__new(cpus, threads);
54489c18 517 if (evlist == NULL) {
de5fa3a8
ACM
518 pr_debug("perf_evlist__new\n");
519 goto out_free_cpus;
520 }
521
522 /* anonymous union fields, can't be initialized above */
523 attr.wakeup_events = 1;
524 attr.sample_period = 1;
525
de5fa3a8
ACM
526 for (i = 0; i < nsyscalls; ++i) {
527 attr.config = ids[i];
528 evsels[i] = perf_evsel__new(&attr, i);
529 if (evsels[i] == NULL) {
530 pr_debug("perf_evsel__new\n");
531 goto out_free_evlist;
532 }
533
534 perf_evlist__add(evlist, evsels[i]);
535
727ab04e 536 if (perf_evsel__open(evsels[i], cpus, threads, false, NULL) < 0) {
de5fa3a8
ACM
537 pr_debug("failed to open counter: %s, "
538 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
539 strerror(errno));
540 goto out_close_fd;
541 }
542 }
543
7e2ed097 544 if (perf_evlist__mmap(evlist, 128, true) < 0) {
de5fa3a8
ACM
545 pr_debug("failed to mmap events: %d (%s)\n", errno,
546 strerror(errno));
547 goto out_close_fd;
548 }
549
550 for (i = 0; i < nsyscalls; ++i)
551 for (j = 0; j < expected_nr_events[i]; ++j) {
552 int foo = syscalls[i]();
553 ++foo;
554 }
555
aece948f 556 while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) {
8d50e5b4 557 struct perf_sample sample;
de5fa3a8
ACM
558
559 if (event->header.type != PERF_RECORD_SAMPLE) {
560 pr_debug("unexpected %s event\n",
8115d60c 561 perf_event__name(event->header.type));
de5fa3a8
ACM
562 goto out_munmap;
563 }
564
cb0b29e0 565 err = perf_evlist__parse_sample(evlist, event, &sample, false);
5538beca
FW
566 if (err) {
567 pr_err("Can't parse sample, err = %d\n", err);
568 goto out_munmap;
569 }
570
de5fa3a8
ACM
571 evsel = perf_evlist__id2evsel(evlist, sample.id);
572 if (evsel == NULL) {
573 pr_debug("event with id %" PRIu64
574 " doesn't map to an evsel\n", sample.id);
575 goto out_munmap;
576 }
577 nr_events[evsel->idx]++;
578 }
579
580 list_for_each_entry(evsel, &evlist->entries, node) {
581 if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) {
582 pr_debug("expected %d %s events, got %d\n",
583 expected_nr_events[evsel->idx],
7289f83c 584 perf_evsel__name(evsel), nr_events[evsel->idx]);
de5fa3a8
ACM
585 goto out_munmap;
586 }
587 }
588
589 err = 0;
590out_munmap:
7e2ed097 591 perf_evlist__munmap(evlist);
de5fa3a8
ACM
592out_close_fd:
593 for (i = 0; i < nsyscalls; ++i)
594 perf_evsel__close_fd(evsels[i], 1, threads->nr);
595out_free_evlist:
596 perf_evlist__delete(evlist);
597out_free_cpus:
598 cpu_map__delete(cpus);
599out_free_threads:
600 thread_map__delete(threads);
601 return err;
602#undef nsyscalls
603}
604
3e7c439a
ACM
605static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t **maskp,
606 size_t *sizep)
607{
608 cpu_set_t *mask;
609 size_t size;
610 int i, cpu = -1, nrcpus = 1024;
611realloc:
612 mask = CPU_ALLOC(nrcpus);
613 size = CPU_ALLOC_SIZE(nrcpus);
614 CPU_ZERO_S(size, mask);
615
616 if (sched_getaffinity(pid, size, mask) == -1) {
617 CPU_FREE(mask);
618 if (errno == EINVAL && nrcpus < (1024 << 8)) {
619 nrcpus = nrcpus << 2;
620 goto realloc;
621 }
622 perror("sched_getaffinity");
623 return -1;
624 }
625
626 for (i = 0; i < nrcpus; i++) {
627 if (CPU_ISSET_S(i, size, mask)) {
628 if (cpu == -1) {
629 cpu = i;
630 *maskp = mask;
631 *sizep = size;
632 } else
633 CPU_CLR_S(i, size, mask);
634 }
635 }
636
637 if (cpu == -1)
638 CPU_FREE(mask);
639
640 return cpu;
641}
642
643static int test__PERF_RECORD(void)
644{
645 struct perf_record_opts opts = {
b809ac10
NK
646 .target = {
647 .uid = UINT_MAX,
d1cb9fce 648 .uses_mmap = true,
b809ac10 649 },
3e7c439a
ACM
650 .no_delay = true,
651 .freq = 10,
652 .mmap_pages = 256,
3e7c439a
ACM
653 };
654 cpu_set_t *cpu_mask = NULL;
655 size_t cpu_mask_size = 0;
656 struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
657 struct perf_evsel *evsel;
658 struct perf_sample sample;
659 const char *cmd = "sleep";
660 const char *argv[] = { cmd, "1", NULL, };
661 char *bname;
7f3be652 662 u64 prev_time = 0;
3e7c439a
ACM
663 bool found_cmd_mmap = false,
664 found_libc_mmap = false,
665 found_vdso_mmap = false,
666 found_ld_mmap = false;
bde09467 667 int err = -1, errs = 0, i, wakeups = 0;
3e7c439a
ACM
668 u32 cpu;
669 int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
670
671 if (evlist == NULL || argv == NULL) {
672 pr_debug("Not enough memory to create evlist\n");
673 goto out;
674 }
675
676 /*
677 * We need at least one evsel in the evlist, use the default
678 * one: "cycles".
679 */
680 err = perf_evlist__add_default(evlist);
681 if (err < 0) {
682 pr_debug("Not enough memory to create evsel\n");
683 goto out_delete_evlist;
684 }
685
686 /*
687 * Create maps of threads and cpus to monitor. In this case
688 * we start with all threads and cpus (-1, -1) but then in
689 * perf_evlist__prepare_workload we'll fill in the only thread
690 * we're monitoring, the one forked there.
691 */
b809ac10 692 err = perf_evlist__create_maps(evlist, &opts.target);
3e7c439a
ACM
693 if (err < 0) {
694 pr_debug("Not enough memory to create thread/cpu maps\n");
695 goto out_delete_evlist;
696 }
697
698 /*
699 * Prepare the workload in argv[] to run, it'll fork it, and then wait
700 * for perf_evlist__start_workload() to exec it. This is done this way
701 * so that we have time to open the evlist (calling sys_perf_event_open
702 * on all the fds) and then mmap them.
703 */
704 err = perf_evlist__prepare_workload(evlist, &opts, argv);
705 if (err < 0) {
706 pr_debug("Couldn't run the workload!\n");
707 goto out_delete_evlist;
708 }
709
710 /*
711 * Config the evsels, setting attr->comm on the first one, etc.
712 */
713 evsel = list_entry(evlist->entries.next, struct perf_evsel, node);
714 evsel->attr.sample_type |= PERF_SAMPLE_CPU;
715 evsel->attr.sample_type |= PERF_SAMPLE_TID;
716 evsel->attr.sample_type |= PERF_SAMPLE_TIME;
717 perf_evlist__config_attrs(evlist, &opts);
718
719 err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask,
720 &cpu_mask_size);
721 if (err < 0) {
722 pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno));
723 goto out_delete_evlist;
724 }
725
726 cpu = err;
727
728 /*
729 * So that we can check perf_sample.cpu on all the samples.
730 */
731 if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, cpu_mask) < 0) {
732 pr_debug("sched_setaffinity: %s\n", strerror(errno));
733 goto out_free_cpu_mask;
734 }
735
736 /*
737 * Call sys_perf_event_open on all the fds on all the evsels,
738 * grouping them if asked to.
739 */
740 err = perf_evlist__open(evlist, opts.group);
741 if (err < 0) {
742 pr_debug("perf_evlist__open: %s\n", strerror(errno));
743 goto out_delete_evlist;
744 }
745
746 /*
747 * mmap the first fd on a given CPU and ask for events for the other
748 * fds in the same CPU to be injected in the same mmap ring buffer
749 * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)).
750 */
751 err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
752 if (err < 0) {
753 pr_debug("perf_evlist__mmap: %s\n", strerror(errno));
754 goto out_delete_evlist;
755 }
756
3e7c439a
ACM
757 /*
758 * Now that all is properly set up, enable the events, they will
759 * count just on workload.pid, which will start...
760 */
761 perf_evlist__enable(evlist);
762
763 /*
764 * Now!
765 */
766 perf_evlist__start_workload(evlist);
767
3e7c439a
ACM
768 while (1) {
769 int before = total_events;
770
771 for (i = 0; i < evlist->nr_mmaps; i++) {
772 union perf_event *event;
773
774 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
775 const u32 type = event->header.type;
776 const char *name = perf_event__name(type);
777
778 ++total_events;
779 if (type < PERF_RECORD_MAX)
780 nr_events[type]++;
781
cb0b29e0 782 err = perf_evlist__parse_sample(evlist, event, &sample, false);
f71c49e5 783 if (err < 0) {
3e7c439a
ACM
784 if (verbose)
785 perf_event__fprintf(event, stderr);
786 pr_debug("Couldn't parse sample\n");
787 goto out_err;
788 }
789
790 if (verbose) {
791 pr_info("%" PRIu64" %d ", sample.time, sample.cpu);
792 perf_event__fprintf(event, stderr);
793 }
794
795 if (prev_time > sample.time) {
796 pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n",
797 name, prev_time, sample.time);
f71c49e5 798 ++errs;
3e7c439a
ACM
799 }
800
801 prev_time = sample.time;
802
803 if (sample.cpu != cpu) {
804 pr_debug("%s with unexpected cpu, expected %d, got %d\n",
805 name, cpu, sample.cpu);
f71c49e5 806 ++errs;
3e7c439a
ACM
807 }
808
809 if ((pid_t)sample.pid != evlist->workload.pid) {
810 pr_debug("%s with unexpected pid, expected %d, got %d\n",
811 name, evlist->workload.pid, sample.pid);
f71c49e5 812 ++errs;
3e7c439a
ACM
813 }
814
815 if ((pid_t)sample.tid != evlist->workload.pid) {
816 pr_debug("%s with unexpected tid, expected %d, got %d\n",
817 name, evlist->workload.pid, sample.tid);
f71c49e5 818 ++errs;
3e7c439a
ACM
819 }
820
821 if ((type == PERF_RECORD_COMM ||
822 type == PERF_RECORD_MMAP ||
823 type == PERF_RECORD_FORK ||
824 type == PERF_RECORD_EXIT) &&
825 (pid_t)event->comm.pid != evlist->workload.pid) {
826 pr_debug("%s with unexpected pid/tid\n", name);
f71c49e5 827 ++errs;
3e7c439a
ACM
828 }
829
830 if ((type == PERF_RECORD_COMM ||
831 type == PERF_RECORD_MMAP) &&
832 event->comm.pid != event->comm.tid) {
833 pr_debug("%s with different pid/tid!\n", name);
f71c49e5 834 ++errs;
3e7c439a
ACM
835 }
836
837 switch (type) {
838 case PERF_RECORD_COMM:
839 if (strcmp(event->comm.comm, cmd)) {
840 pr_debug("%s with unexpected comm!\n", name);
f71c49e5 841 ++errs;
3e7c439a
ACM
842 }
843 break;
844 case PERF_RECORD_EXIT:
845 goto found_exit;
846 case PERF_RECORD_MMAP:
847 bname = strrchr(event->mmap.filename, '/');
848 if (bname != NULL) {
849 if (!found_cmd_mmap)
850 found_cmd_mmap = !strcmp(bname + 1, cmd);
851 if (!found_libc_mmap)
852 found_libc_mmap = !strncmp(bname + 1, "libc", 4);
853 if (!found_ld_mmap)
854 found_ld_mmap = !strncmp(bname + 1, "ld", 2);
855 } else if (!found_vdso_mmap)
856 found_vdso_mmap = !strcmp(event->mmap.filename, "[vdso]");
857 break;
858
859 case PERF_RECORD_SAMPLE:
860 /* Just ignore samples for now */
861 break;
862 default:
863 pr_debug("Unexpected perf_event->header.type %d!\n",
864 type);
f71c49e5 865 ++errs;
3e7c439a
ACM
866 }
867 }
868 }
869
870 /*
871 * We don't use poll here because at least at 3.1 times the
872 * PERF_RECORD_{!SAMPLE} events don't honour
873 * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does.
874 */
875 if (total_events == before && false)
876 poll(evlist->pollfd, evlist->nr_fds, -1);
877
878 sleep(1);
879 if (++wakeups > 5) {
880 pr_debug("No PERF_RECORD_EXIT event!\n");
f71c49e5 881 break;
3e7c439a
ACM
882 }
883 }
884
885found_exit:
886 if (nr_events[PERF_RECORD_COMM] > 1) {
887 pr_debug("Excessive number of PERF_RECORD_COMM events!\n");
f71c49e5 888 ++errs;
3e7c439a
ACM
889 }
890
891 if (nr_events[PERF_RECORD_COMM] == 0) {
892 pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd);
f71c49e5 893 ++errs;
3e7c439a
ACM
894 }
895
896 if (!found_cmd_mmap) {
897 pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd);
f71c49e5 898 ++errs;
3e7c439a
ACM
899 }
900
901 if (!found_libc_mmap) {
902 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc");
f71c49e5 903 ++errs;
3e7c439a
ACM
904 }
905
906 if (!found_ld_mmap) {
907 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld");
f71c49e5 908 ++errs;
3e7c439a
ACM
909 }
910
911 if (!found_vdso_mmap) {
912 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]");
f71c49e5 913 ++errs;
3e7c439a 914 }
3e7c439a
ACM
915out_err:
916 perf_evlist__munmap(evlist);
917out_free_cpu_mask:
918 CPU_FREE(cpu_mask);
919out_delete_evlist:
920 perf_evlist__delete(evlist);
921out:
f71c49e5 922 return (err < 0 || errs > 0) ? -1 : 0;
3e7c439a
ACM
923}
924
08aa0d1f
PZ
925
926#if defined(__x86_64__) || defined(__i386__)
927
928#define barrier() asm volatile("" ::: "memory")
929
930static u64 rdpmc(unsigned int counter)
931{
932 unsigned int low, high;
933
934 asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));
935
936 return low | ((u64)high) << 32;
937}
938
939static u64 rdtsc(void)
940{
941 unsigned int low, high;
942
943 asm volatile("rdtsc" : "=a" (low), "=d" (high));
944
945 return low | ((u64)high) << 32;
946}
947
948static u64 mmap_read_self(void *addr)
949{
950 struct perf_event_mmap_page *pc = addr;
951 u32 seq, idx, time_mult = 0, time_shift = 0;
952 u64 count, cyc = 0, time_offset = 0, enabled, running, delta;
953
954 do {
955 seq = pc->lock;
956 barrier();
957
958 enabled = pc->time_enabled;
959 running = pc->time_running;
960
961 if (enabled != running) {
962 cyc = rdtsc();
963 time_mult = pc->time_mult;
964 time_shift = pc->time_shift;
965 time_offset = pc->time_offset;
966 }
967
968 idx = pc->index;
969 count = pc->offset;
970 if (idx)
971 count += rdpmc(idx - 1);
972
973 barrier();
974 } while (pc->lock != seq);
975
976 if (enabled != running) {
977 u64 quot, rem;
978
979 quot = (cyc >> time_shift);
980 rem = cyc & ((1 << time_shift) - 1);
981 delta = time_offset + quot * time_mult +
982 ((rem * time_mult) >> time_shift);
983
984 enabled += delta;
985 if (idx)
986 running += delta;
987
988 quot = count / running;
989 rem = count % running;
990 count = quot * enabled + (rem * enabled) / running;
991 }
992
993 return count;
994}
995
996/*
997 * If the RDPMC instruction faults then signal this back to the test parent task:
998 */
999static void segfault_handler(int sig __used, siginfo_t *info __used, void *uc __used)
1000{
1001 exit(-1);
1002}
1003
1004static int __test__rdpmc(void)
1005{
1006 long page_size = sysconf(_SC_PAGE_SIZE);
1007 volatile int tmp = 0;
1008 u64 i, loops = 1000;
1009 int n;
1010 int fd;
1011 void *addr;
1012 struct perf_event_attr attr = {
1013 .type = PERF_TYPE_HARDWARE,
1014 .config = PERF_COUNT_HW_INSTRUCTIONS,
1015 .exclude_kernel = 1,
1016 };
1017 u64 delta_sum = 0;
1018 struct sigaction sa;
1019
1020 sigfillset(&sa.sa_mask);
1021 sa.sa_sigaction = segfault_handler;
1022 sigaction(SIGSEGV, &sa, NULL);
1023
08aa0d1f
PZ
1024 fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
1025 if (fd < 0) {
1026 die("Error: sys_perf_event_open() syscall returned "
1027 "with %d (%s)\n", fd, strerror(errno));
1028 }
1029
1030 addr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0);
1031 if (addr == (void *)(-1)) {
1032 die("Error: mmap() syscall returned "
1033 "with (%s)\n", strerror(errno));
1034 }
1035
1036 for (n = 0; n < 6; n++) {
1037 u64 stamp, now, delta;
1038
1039 stamp = mmap_read_self(addr);
1040
1041 for (i = 0; i < loops; i++)
1042 tmp++;
1043
1044 now = mmap_read_self(addr);
1045 loops *= 10;
1046
1047 delta = now - stamp;
23080e4c 1048 pr_debug("%14d: %14Lu\n", n, (long long)delta);
08aa0d1f
PZ
1049
1050 delta_sum += delta;
1051 }
1052
1053 munmap(addr, page_size);
1054 close(fd);
1055
23080e4c 1056 pr_debug(" ");
08aa0d1f
PZ
1057
1058 if (!delta_sum)
1059 return -1;
1060
1061 return 0;
1062}
1063
1064static int test__rdpmc(void)
1065{
1066 int status = 0;
1067 int wret = 0;
1068 int ret;
1069 int pid;
1070
1071 pid = fork();
1072 if (pid < 0)
1073 return -1;
1074
1075 if (!pid) {
1076 ret = __test__rdpmc();
1077
1078 exit(ret);
1079 }
1080
1081 wret = waitpid(pid, &status, 0);
1082 if (wret < 0 || status)
1083 return -1;
1084
1085 return 0;
1086}
1087
1088#endif
1089
cd82a32e
JO
1090static int test__perf_pmu(void)
1091{
1092 return perf_pmu__test();
1093}
1094
1c6a800c
ACM
1095static struct test {
1096 const char *desc;
1097 int (*func)(void);
1098} tests[] = {
1099 {
1100 .desc = "vmlinux symtab matches kallsyms",
1101 .func = test__vmlinux_matches_kallsyms,
1102 },
d854861c
ACM
1103 {
1104 .desc = "detect open syscall event",
1105 .func = test__open_syscall_event,
1106 },
0252208e
ACM
1107 {
1108 .desc = "detect open syscall event on all cpus",
1109 .func = test__open_syscall_event_on_all_cpus,
1110 },
de5fa3a8
ACM
1111 {
1112 .desc = "read samples using the mmap interface",
1113 .func = test__basic_mmap,
1114 },
13b62567
JO
1115 {
1116 .desc = "parse events tests",
f50246e2 1117 .func = parse_events__test,
13b62567 1118 },
08aa0d1f
PZ
1119#if defined(__x86_64__) || defined(__i386__)
1120 {
1121 .desc = "x86 rdpmc test",
1122 .func = test__rdpmc,
1123 },
1124#endif
3e7c439a
ACM
1125 {
1126 .desc = "Validate PERF_RECORD_* events & perf_sample fields",
1127 .func = test__PERF_RECORD,
1128 },
cd82a32e
JO
1129 {
1130 .desc = "Test perf pmu format parsing",
1131 .func = test__perf_pmu,
1132 },
f7add556
JO
1133 {
1134 .desc = "Test dso data interface",
1135 .func = dso__test_data,
1136 },
1c6a800c
ACM
1137 {
1138 .func = NULL,
1139 },
1140};
1141
e60770a0 1142static bool perf_test__matches(int curr, int argc, const char *argv[])
1c6a800c 1143{
e60770a0
ACM
1144 int i;
1145
1146 if (argc == 0)
1147 return true;
1148
1149 for (i = 0; i < argc; ++i) {
1150 char *end;
1151 long nr = strtoul(argv[i], &end, 10);
1152
1153 if (*end == '\0') {
1154 if (nr == curr + 1)
1155 return true;
1156 continue;
1157 }
1c6a800c 1158
e60770a0
ACM
1159 if (strstr(tests[curr].desc, argv[i]))
1160 return true;
1161 }
1162
1163 return false;
1164}
1165
1166static int __cmd_test(int argc, const char *argv[])
1167{
1168 int i = 0;
1c6a800c
ACM
1169
1170 while (tests[i].func) {
e60770a0
ACM
1171 int curr = i++, err;
1172
1173 if (!perf_test__matches(curr, argc, argv))
1174 continue;
1175
1176 pr_info("%2d: %s:", i, tests[curr].desc);
1c6a800c 1177 pr_debug("\n--- start ---\n");
e60770a0
ACM
1178 err = tests[curr].func();
1179 pr_debug("---- end ----\n%s:", tests[curr].desc);
1c6a800c 1180 pr_info(" %s\n", err ? "FAILED!\n" : "Ok");
1c6a800c
ACM
1181 }
1182
1183 return 0;
1184}
1185
e60770a0
ACM
1186static int perf_test__list(int argc, const char **argv)
1187{
1188 int i = 0;
1189
1190 while (tests[i].func) {
1191 int curr = i++;
1192
1193 if (argc > 1 && !strstr(tests[curr].desc, argv[1]))
1194 continue;
1195
1196 pr_info("%2d: %s\n", i, tests[curr].desc);
1197 }
1198
1199 return 0;
1200}
1c6a800c 1201
e60770a0
ACM
1202int cmd_test(int argc, const char **argv, const char *prefix __used)
1203{
1204 const char * const test_usage[] = {
1205 "perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]",
1206 NULL,
1207 };
1208 const struct option test_options[] = {
c30ab8aa 1209 OPT_INCR('v', "verbose", &verbose,
1c6a800c
ACM
1210 "be more verbose (show symbol address, etc)"),
1211 OPT_END()
e60770a0 1212 };
1c6a800c 1213
1c6a800c 1214 argc = parse_options(argc, argv, test_options, test_usage, 0);
e60770a0
ACM
1215 if (argc >= 1 && !strcmp(argv[0], "list"))
1216 return perf_test__list(argc, argv);
1c6a800c
ACM
1217
1218 symbol_conf.priv_size = sizeof(int);
1219 symbol_conf.sort_by_name = true;
1220 symbol_conf.try_vmlinux_path = true;
1221
1222 if (symbol__init() < 0)
1223 return -1;
1224
e60770a0 1225 return __cmd_test(argc, argv);
1c6a800c 1226}
This page took 0.223662 seconds and 5 git commands to generate.