Merge branch 'core/locking' into perfcounters/core
[deliverable/linux.git] / Documentation / perf_counter / builtin-top.c
1 /*
2 * kerneltop.c: show top kernel functions - performance counters showcase
3
4 Build with:
5
6 make -C Documentation/perf_counter/
7
8 Sample output:
9
10 ------------------------------------------------------------------------------
11 KernelTop: 2669 irqs/sec [NMI, cache-misses/cache-refs], (all, cpu: 2)
12 ------------------------------------------------------------------------------
13
14 weight RIP kernel function
15 ______ ________________ _______________
16
17 35.20 - ffffffff804ce74b : skb_copy_and_csum_dev
18 33.00 - ffffffff804cb740 : sock_alloc_send_skb
19 31.26 - ffffffff804ce808 : skb_push
20 22.43 - ffffffff80510004 : tcp_established_options
21 19.00 - ffffffff8027d250 : find_get_page
22 15.76 - ffffffff804e4fc9 : eth_type_trans
23 15.20 - ffffffff804d8baa : dst_release
24 14.86 - ffffffff804cf5d8 : skb_release_head_state
25 14.00 - ffffffff802217d5 : read_hpet
26 12.00 - ffffffff804ffb7f : __ip_local_out
27 11.97 - ffffffff804fc0c8 : ip_local_deliver_finish
28 8.54 - ffffffff805001a3 : ip_queue_xmit
29 */
30
31 /*
32 * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
33 *
34 * Improvements and fixes by:
35 *
36 * Arjan van de Ven <arjan@linux.intel.com>
37 * Yanmin Zhang <yanmin.zhang@intel.com>
38 * Wu Fengguang <fengguang.wu@intel.com>
39 * Mike Galbraith <efault@gmx.de>
40 * Paul Mackerras <paulus@samba.org>
41 *
42 * Released under the GPL v2. (and only v2, not any later version)
43 */
44
45 #include "util/util.h"
46
47 #include <getopt.h>
48 #include <assert.h>
49 #include <fcntl.h>
50 #include <stdio.h>
51 #include <errno.h>
52 #include <time.h>
53 #include <sched.h>
54 #include <pthread.h>
55
56 #include <sys/syscall.h>
57 #include <sys/ioctl.h>
58 #include <sys/poll.h>
59 #include <sys/prctl.h>
60 #include <sys/wait.h>
61 #include <sys/uio.h>
62 #include <sys/mman.h>
63
64 #include <linux/unistd.h>
65 #include <linux/types.h>
66
67 #include "../../include/linux/perf_counter.h"
68
69 #include "perf.h"
70
71 static int system_wide = 0;
72
73 static int nr_counters = 0;
74 static __u64 event_id[MAX_COUNTERS] = {
75 EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK),
76 EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES),
77 EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS),
78 EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS),
79
80 EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES),
81 EID(PERF_TYPE_HARDWARE, PERF_COUNT_INSTRUCTIONS),
82 EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_REFERENCES),
83 EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_MISSES),
84 };
85 static int default_interval = 100000;
86 static int event_count[MAX_COUNTERS];
87 static int fd[MAX_NR_CPUS][MAX_COUNTERS];
88
89 static __u64 count_filter = 100;
90
91 static int tid = -1;
92 static int profile_cpu = -1;
93 static int nr_cpus = 0;
94 static int nmi = 1;
95 static unsigned int realtime_prio = 0;
96 static int group = 0;
97 static unsigned int page_size;
98 static unsigned int mmap_pages = 16;
99 static int use_mmap = 0;
100 static int use_munmap = 0;
101
102 static char *vmlinux;
103
104 static char *sym_filter;
105 static unsigned long filter_start;
106 static unsigned long filter_end;
107
108 static int delay_secs = 2;
109 static int zero;
110 static int dump_symtab;
111
112 static int scale;
113
114 struct source_line {
115 uint64_t EIP;
116 unsigned long count;
117 char *line;
118 struct source_line *next;
119 };
120
121 static struct source_line *lines;
122 static struct source_line **lines_tail;
123
124 static const unsigned int default_count[] = {
125 1000000,
126 1000000,
127 10000,
128 10000,
129 1000000,
130 10000,
131 };
132
133 static char *hw_event_names[] = {
134 "CPU cycles",
135 "instructions",
136 "cache references",
137 "cache misses",
138 "branches",
139 "branch misses",
140 "bus cycles",
141 };
142
143 static char *sw_event_names[] = {
144 "cpu clock ticks",
145 "task clock ticks",
146 "pagefaults",
147 "context switches",
148 "CPU migrations",
149 "minor faults",
150 "major faults",
151 };
152
153 struct event_symbol {
154 __u64 event;
155 char *symbol;
156 };
157
158 static struct event_symbol event_symbols[] = {
159 {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cpu-cycles", },
160 {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cycles", },
161 {EID(PERF_TYPE_HARDWARE, PERF_COUNT_INSTRUCTIONS), "instructions", },
162 {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_REFERENCES), "cache-references", },
163 {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_MISSES), "cache-misses", },
164 {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branch-instructions", },
165 {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branches", },
166 {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_MISSES), "branch-misses", },
167 {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BUS_CYCLES), "bus-cycles", },
168
169 {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_CLOCK), "cpu-clock", },
170 {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK), "task-clock", },
171 {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "page-faults", },
172 {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "faults", },
173 {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MIN), "minor-faults", },
174 {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MAJ), "major-faults", },
175 {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "context-switches", },
176 {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "cs", },
177 {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "cpu-migrations", },
178 {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "migrations", },
179 };
180
181 #define __PERF_COUNTER_FIELD(config, name) \
182 ((config & PERF_COUNTER_##name##_MASK) >> PERF_COUNTER_##name##_SHIFT)
183
184 #define PERF_COUNTER_RAW(config) __PERF_COUNTER_FIELD(config, RAW)
185 #define PERF_COUNTER_CONFIG(config) __PERF_COUNTER_FIELD(config, CONFIG)
186 #define PERF_COUNTER_TYPE(config) __PERF_COUNTER_FIELD(config, TYPE)
187 #define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT)
188
189 static void display_events_help(void)
190 {
191 unsigned int i;
192 __u64 e;
193
194 printf(
195 " -e EVENT --event=EVENT # symbolic-name abbreviations");
196
197 for (i = 0; i < ARRAY_SIZE(event_symbols); i++) {
198 int type, id;
199
200 e = event_symbols[i].event;
201 type = PERF_COUNTER_TYPE(e);
202 id = PERF_COUNTER_ID(e);
203
204 printf("\n %d:%d: %-20s",
205 type, id, event_symbols[i].symbol);
206 }
207
208 printf("\n"
209 " rNNN: raw PMU events (eventsel+umask)\n\n");
210 }
211
212 static void display_help(void)
213 {
214 printf(
215 "Usage: kerneltop [<options>]\n"
216 " Or: kerneltop -S [<options>] COMMAND [ARGS]\n\n"
217 "KernelTop Options (up to %d event types can be specified at once):\n\n",
218 MAX_COUNTERS);
219
220 display_events_help();
221
222 printf(
223 " -c CNT --count=CNT # event period to sample\n\n"
224 " -C CPU --cpu=CPU # CPU (-1 for all) [default: -1]\n"
225 " -p PID --pid=PID # PID of sampled task (-1 for all) [default: -1]\n\n"
226 " -l # show scale factor for RR events\n"
227 " -d delay --delay=<seconds> # sampling/display delay [default: 2]\n"
228 " -f CNT --filter=CNT # min-event-count filter [default: 100]\n\n"
229 " -r prio --realtime=<prio> # event acquisition runs with SCHED_FIFO policy\n"
230 " -s symbol --symbol=<symbol> # function to be showed annotated one-shot\n"
231 " -x path --vmlinux=<path> # the vmlinux binary, required for -s use\n"
232 " -z --zero # zero counts after display\n"
233 " -D --dump_symtab # dump symbol table to stderr on startup\n"
234 " -m pages --mmap_pages=<pages> # number of mmap data pages\n"
235 " -M --mmap_info # print mmap info stream\n"
236 " -U --munmap_info # print munmap info stream\n"
237 );
238
239 exit(0);
240 }
241
242 static char *event_name(int ctr)
243 {
244 __u64 config = event_id[ctr];
245 int type = PERF_COUNTER_TYPE(config);
246 int id = PERF_COUNTER_ID(config);
247 static char buf[32];
248
249 if (PERF_COUNTER_RAW(config)) {
250 sprintf(buf, "raw 0x%llx", PERF_COUNTER_CONFIG(config));
251 return buf;
252 }
253
254 switch (type) {
255 case PERF_TYPE_HARDWARE:
256 if (id < PERF_HW_EVENTS_MAX)
257 return hw_event_names[id];
258 return "unknown-hardware";
259
260 case PERF_TYPE_SOFTWARE:
261 if (id < PERF_SW_EVENTS_MAX)
262 return sw_event_names[id];
263 return "unknown-software";
264
265 default:
266 break;
267 }
268
269 return "unknown";
270 }
271
272 /*
273 * Each event can have multiple symbolic names.
274 * Symbolic names are (almost) exactly matched.
275 */
276 static __u64 match_event_symbols(char *str)
277 {
278 __u64 config, id;
279 int type;
280 unsigned int i;
281
282 if (sscanf(str, "r%llx", &config) == 1)
283 return config | PERF_COUNTER_RAW_MASK;
284
285 if (sscanf(str, "%d:%llu", &type, &id) == 2)
286 return EID(type, id);
287
288 for (i = 0; i < ARRAY_SIZE(event_symbols); i++) {
289 if (!strncmp(str, event_symbols[i].symbol,
290 strlen(event_symbols[i].symbol)))
291 return event_symbols[i].event;
292 }
293
294 return ~0ULL;
295 }
296
297 static int parse_events(char *str)
298 {
299 __u64 config;
300
301 again:
302 if (nr_counters == MAX_COUNTERS)
303 return -1;
304
305 config = match_event_symbols(str);
306 if (config == ~0ULL)
307 return -1;
308
309 event_id[nr_counters] = config;
310 nr_counters++;
311
312 str = strstr(str, ",");
313 if (str) {
314 str++;
315 goto again;
316 }
317
318 return 0;
319 }
320
321 /*
322 * Symbols
323 */
324
325 static uint64_t min_ip;
326 static uint64_t max_ip = -1ll;
327
328 struct sym_entry {
329 unsigned long long addr;
330 char *sym;
331 unsigned long count[MAX_COUNTERS];
332 int skip;
333 struct source_line *source;
334 };
335
336 #define MAX_SYMS 100000
337
338 static int sym_table_count;
339
340 struct sym_entry *sym_filter_entry;
341
342 static struct sym_entry sym_table[MAX_SYMS];
343
344 static void show_details(struct sym_entry *sym);
345
346 /*
347 * Ordering weight: count-1 * count-2 * ... / count-n
348 */
349 static double sym_weight(const struct sym_entry *sym)
350 {
351 double weight;
352 int counter;
353
354 weight = sym->count[0];
355
356 for (counter = 1; counter < nr_counters-1; counter++)
357 weight *= sym->count[counter];
358
359 weight /= (sym->count[counter] + 1);
360
361 return weight;
362 }
363
364 static int compare(const void *__sym1, const void *__sym2)
365 {
366 const struct sym_entry *sym1 = __sym1, *sym2 = __sym2;
367
368 return sym_weight(sym1) < sym_weight(sym2);
369 }
370
371 static long events;
372 static long userspace_events;
373 static const char CONSOLE_CLEAR[] = "\e[H\e[2J";
374
375 static struct sym_entry tmp[MAX_SYMS];
376
377 static void print_sym_table(void)
378 {
379 int i, printed;
380 int counter;
381 float events_per_sec = events/delay_secs;
382 float kevents_per_sec = (events-userspace_events)/delay_secs;
383 float sum_kevents = 0.0;
384
385 events = userspace_events = 0;
386 memcpy(tmp, sym_table, sizeof(sym_table[0])*sym_table_count);
387 qsort(tmp, sym_table_count, sizeof(tmp[0]), compare);
388
389 for (i = 0; i < sym_table_count && tmp[i].count[0]; i++)
390 sum_kevents += tmp[i].count[0];
391
392 write(1, CONSOLE_CLEAR, strlen(CONSOLE_CLEAR));
393
394 printf(
395 "------------------------------------------------------------------------------\n");
396 printf( " KernelTop:%8.0f irqs/sec kernel:%4.1f%% [%s, ",
397 events_per_sec,
398 100.0 - (100.0*((events_per_sec-kevents_per_sec)/events_per_sec)),
399 nmi ? "NMI" : "IRQ");
400
401 if (nr_counters == 1)
402 printf("%d ", event_count[0]);
403
404 for (counter = 0; counter < nr_counters; counter++) {
405 if (counter)
406 printf("/");
407
408 printf("%s", event_name(counter));
409 }
410
411 printf( "], ");
412
413 if (tid != -1)
414 printf(" (tid: %d", tid);
415 else
416 printf(" (all");
417
418 if (profile_cpu != -1)
419 printf(", cpu: %d)\n", profile_cpu);
420 else {
421 if (tid != -1)
422 printf(")\n");
423 else
424 printf(", %d CPUs)\n", nr_cpus);
425 }
426
427 printf("------------------------------------------------------------------------------\n\n");
428
429 if (nr_counters == 1)
430 printf(" events pcnt");
431 else
432 printf(" weight events pcnt");
433
434 printf(" RIP kernel function\n"
435 " ______ ______ _____ ________________ _______________\n\n"
436 );
437
438 for (i = 0, printed = 0; i < sym_table_count; i++) {
439 float pcnt;
440 int count;
441
442 if (printed <= 18 && tmp[i].count[0] >= count_filter) {
443 pcnt = 100.0 - (100.0*((sum_kevents-tmp[i].count[0])/sum_kevents));
444
445 if (nr_counters == 1)
446 printf("%19.2f - %4.1f%% - %016llx : %s\n",
447 sym_weight(tmp + i),
448 pcnt, tmp[i].addr, tmp[i].sym);
449 else
450 printf("%8.1f %10ld - %4.1f%% - %016llx : %s\n",
451 sym_weight(tmp + i),
452 tmp[i].count[0],
453 pcnt, tmp[i].addr, tmp[i].sym);
454 printed++;
455 }
456 /*
457 * Add decay to the counts:
458 */
459 for (count = 0; count < nr_counters; count++)
460 sym_table[i].count[count] = zero ? 0 : sym_table[i].count[count] * 7 / 8;
461 }
462
463 if (sym_filter_entry)
464 show_details(sym_filter_entry);
465
466 {
467 struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
468
469 if (poll(&stdin_poll, 1, 0) == 1) {
470 printf("key pressed - exiting.\n");
471 exit(0);
472 }
473 }
474 }
475
476 static void *display_thread(void *arg)
477 {
478 printf("KernelTop refresh period: %d seconds\n", delay_secs);
479
480 while (!sleep(delay_secs))
481 print_sym_table();
482
483 return NULL;
484 }
485
486 static int read_symbol(FILE *in, struct sym_entry *s)
487 {
488 static int filter_match = 0;
489 char *sym, stype;
490 char str[500];
491 int rc, pos;
492
493 rc = fscanf(in, "%llx %c %499s", &s->addr, &stype, str);
494 if (rc == EOF)
495 return -1;
496
497 assert(rc == 3);
498
499 /* skip until end of line: */
500 pos = strlen(str);
501 do {
502 rc = fgetc(in);
503 if (rc == '\n' || rc == EOF || pos >= 499)
504 break;
505 str[pos] = rc;
506 pos++;
507 } while (1);
508 str[pos] = 0;
509
510 sym = str;
511
512 /* Filter out known duplicates and non-text symbols. */
513 if (!strcmp(sym, "_text"))
514 return 1;
515 if (!min_ip && !strcmp(sym, "_stext"))
516 return 1;
517 if (!strcmp(sym, "_etext") || !strcmp(sym, "_sinittext"))
518 return 1;
519 if (stype != 'T' && stype != 't')
520 return 1;
521 if (!strncmp("init_module", sym, 11) || !strncmp("cleanup_module", sym, 14))
522 return 1;
523 if (strstr(sym, "_text_start") || strstr(sym, "_text_end"))
524 return 1;
525
526 s->sym = malloc(strlen(str));
527 assert(s->sym);
528
529 strcpy((char *)s->sym, str);
530 s->skip = 0;
531
532 /* Tag events to be skipped. */
533 if (!strcmp("default_idle", s->sym) || !strcmp("cpu_idle", s->sym))
534 s->skip = 1;
535 else if (!strcmp("enter_idle", s->sym) || !strcmp("exit_idle", s->sym))
536 s->skip = 1;
537 else if (!strcmp("mwait_idle", s->sym))
538 s->skip = 1;
539
540 if (filter_match == 1) {
541 filter_end = s->addr;
542 filter_match = -1;
543 if (filter_end - filter_start > 10000) {
544 printf("hm, too large filter symbol <%s> - skipping.\n",
545 sym_filter);
546 printf("symbol filter start: %016lx\n", filter_start);
547 printf(" end: %016lx\n", filter_end);
548 filter_end = filter_start = 0;
549 sym_filter = NULL;
550 sleep(1);
551 }
552 }
553 if (filter_match == 0 && sym_filter && !strcmp(s->sym, sym_filter)) {
554 filter_match = 1;
555 filter_start = s->addr;
556 }
557
558 return 0;
559 }
560
561 static int compare_addr(const void *__sym1, const void *__sym2)
562 {
563 const struct sym_entry *sym1 = __sym1, *sym2 = __sym2;
564
565 return sym1->addr > sym2->addr;
566 }
567
568 static void sort_symbol_table(void)
569 {
570 int i, dups;
571
572 do {
573 qsort(sym_table, sym_table_count, sizeof(sym_table[0]), compare_addr);
574 for (i = 0, dups = 0; i < sym_table_count; i++) {
575 if (sym_table[i].addr == sym_table[i+1].addr) {
576 sym_table[i+1].addr = -1ll;
577 dups++;
578 }
579 }
580 sym_table_count -= dups;
581 } while(dups);
582 }
583
584 static void parse_symbols(void)
585 {
586 struct sym_entry *last;
587
588 FILE *kallsyms = fopen("/proc/kallsyms", "r");
589
590 if (!kallsyms) {
591 printf("Could not open /proc/kallsyms - no CONFIG_KALLSYMS_ALL=y?\n");
592 exit(-1);
593 }
594
595 while (!feof(kallsyms)) {
596 if (read_symbol(kallsyms, &sym_table[sym_table_count]) == 0) {
597 sym_table_count++;
598 assert(sym_table_count <= MAX_SYMS);
599 }
600 }
601
602 sort_symbol_table();
603 min_ip = sym_table[0].addr;
604 max_ip = sym_table[sym_table_count-1].addr;
605 last = sym_table + sym_table_count++;
606
607 last->addr = -1ll;
608 last->sym = "<end>";
609
610 if (filter_end) {
611 int count;
612 for (count=0; count < sym_table_count; count ++) {
613 if (!strcmp(sym_table[count].sym, sym_filter)) {
614 sym_filter_entry = &sym_table[count];
615 break;
616 }
617 }
618 }
619 if (dump_symtab) {
620 int i;
621
622 for (i = 0; i < sym_table_count; i++)
623 fprintf(stderr, "%llx %s\n",
624 sym_table[i].addr, sym_table[i].sym);
625 }
626 }
627
628 /*
629 * Source lines
630 */
631
632 static void parse_vmlinux(char *filename)
633 {
634 FILE *file;
635 char command[PATH_MAX*2];
636 if (!filename)
637 return;
638
639 sprintf(command, "objdump --start-address=0x%016lx --stop-address=0x%016lx -dS %s", filter_start, filter_end, filename);
640
641 file = popen(command, "r");
642 if (!file)
643 return;
644
645 lines_tail = &lines;
646 while (!feof(file)) {
647 struct source_line *src;
648 size_t dummy = 0;
649 char *c;
650
651 src = malloc(sizeof(struct source_line));
652 assert(src != NULL);
653 memset(src, 0, sizeof(struct source_line));
654
655 if (getline(&src->line, &dummy, file) < 0)
656 break;
657 if (!src->line)
658 break;
659
660 c = strchr(src->line, '\n');
661 if (c)
662 *c = 0;
663
664 src->next = NULL;
665 *lines_tail = src;
666 lines_tail = &src->next;
667
668 if (strlen(src->line)>8 && src->line[8] == ':')
669 src->EIP = strtoull(src->line, NULL, 16);
670 if (strlen(src->line)>8 && src->line[16] == ':')
671 src->EIP = strtoull(src->line, NULL, 16);
672 }
673 pclose(file);
674 }
675
676 static void record_precise_ip(uint64_t ip)
677 {
678 struct source_line *line;
679
680 for (line = lines; line; line = line->next) {
681 if (line->EIP == ip)
682 line->count++;
683 if (line->EIP > ip)
684 break;
685 }
686 }
687
688 static void lookup_sym_in_vmlinux(struct sym_entry *sym)
689 {
690 struct source_line *line;
691 char pattern[PATH_MAX];
692 sprintf(pattern, "<%s>:", sym->sym);
693
694 for (line = lines; line; line = line->next) {
695 if (strstr(line->line, pattern)) {
696 sym->source = line;
697 break;
698 }
699 }
700 }
701
702 static void show_lines(struct source_line *line_queue, int line_queue_count)
703 {
704 int i;
705 struct source_line *line;
706
707 line = line_queue;
708 for (i = 0; i < line_queue_count; i++) {
709 printf("%8li\t%s\n", line->count, line->line);
710 line = line->next;
711 }
712 }
713
714 #define TRACE_COUNT 3
715
716 static void show_details(struct sym_entry *sym)
717 {
718 struct source_line *line;
719 struct source_line *line_queue = NULL;
720 int displayed = 0;
721 int line_queue_count = 0;
722
723 if (!sym->source)
724 lookup_sym_in_vmlinux(sym);
725 if (!sym->source)
726 return;
727
728 printf("Showing details for %s\n", sym->sym);
729
730 line = sym->source;
731 while (line) {
732 if (displayed && strstr(line->line, ">:"))
733 break;
734
735 if (!line_queue_count)
736 line_queue = line;
737 line_queue_count ++;
738
739 if (line->count >= count_filter) {
740 show_lines(line_queue, line_queue_count);
741 line_queue_count = 0;
742 line_queue = NULL;
743 } else if (line_queue_count > TRACE_COUNT) {
744 line_queue = line_queue->next;
745 line_queue_count --;
746 }
747
748 line->count = 0;
749 displayed++;
750 if (displayed > 300)
751 break;
752 line = line->next;
753 }
754 }
755
756 /*
757 * Binary search in the histogram table and record the hit:
758 */
759 static void record_ip(uint64_t ip, int counter)
760 {
761 int left_idx, middle_idx, right_idx, idx;
762 unsigned long left, middle, right;
763
764 record_precise_ip(ip);
765
766 left_idx = 0;
767 right_idx = sym_table_count-1;
768 assert(ip <= max_ip && ip >= min_ip);
769
770 while (left_idx + 1 < right_idx) {
771 middle_idx = (left_idx + right_idx) / 2;
772
773 left = sym_table[ left_idx].addr;
774 middle = sym_table[middle_idx].addr;
775 right = sym_table[ right_idx].addr;
776
777 if (!(left <= middle && middle <= right)) {
778 printf("%016lx...\n%016lx...\n%016lx\n", left, middle, right);
779 printf("%d %d %d\n", left_idx, middle_idx, right_idx);
780 }
781 assert(left <= middle && middle <= right);
782 if (!(left <= ip && ip <= right)) {
783 printf(" left: %016lx\n", left);
784 printf(" ip: %016lx\n", (unsigned long)ip);
785 printf("right: %016lx\n", right);
786 }
787 assert(left <= ip && ip <= right);
788 /*
789 * [ left .... target .... middle .... right ]
790 * => right := middle
791 */
792 if (ip < middle) {
793 right_idx = middle_idx;
794 continue;
795 }
796 /*
797 * [ left .... middle ... target ... right ]
798 * => left := middle
799 */
800 left_idx = middle_idx;
801 }
802
803 idx = left_idx;
804
805 if (!sym_table[idx].skip)
806 sym_table[idx].count[counter]++;
807 else events--;
808 }
809
810 static void process_event(uint64_t ip, int counter)
811 {
812 events++;
813
814 if (ip < min_ip || ip > max_ip) {
815 userspace_events++;
816 return;
817 }
818
819 record_ip(ip, counter);
820 }
821
822 static void process_options(int argc, char **argv)
823 {
824 int error = 0, counter;
825
826 for (;;) {
827 int option_index = 0;
828 /** Options for getopt */
829 static struct option long_options[] = {
830 {"count", required_argument, NULL, 'c'},
831 {"cpu", required_argument, NULL, 'C'},
832 {"delay", required_argument, NULL, 'd'},
833 {"dump_symtab", no_argument, NULL, 'D'},
834 {"event", required_argument, NULL, 'e'},
835 {"filter", required_argument, NULL, 'f'},
836 {"group", required_argument, NULL, 'g'},
837 {"help", no_argument, NULL, 'h'},
838 {"nmi", required_argument, NULL, 'n'},
839 {"mmap_info", no_argument, NULL, 'M'},
840 {"mmap_pages", required_argument, NULL, 'm'},
841 {"munmap_info", no_argument, NULL, 'U'},
842 {"pid", required_argument, NULL, 'p'},
843 {"realtime", required_argument, NULL, 'r'},
844 {"scale", no_argument, NULL, 'l'},
845 {"symbol", required_argument, NULL, 's'},
846 {"stat", no_argument, NULL, 'S'},
847 {"vmlinux", required_argument, NULL, 'x'},
848 {"zero", no_argument, NULL, 'z'},
849 {NULL, 0, NULL, 0 }
850 };
851 int c = getopt_long(argc, argv, "+:ac:C:d:De:f:g:hln:m:p:r:s:Sx:zMU",
852 long_options, &option_index);
853 if (c == -1)
854 break;
855
856 switch (c) {
857 case 'a': system_wide = 1; break;
858 case 'c': default_interval = atoi(optarg); break;
859 case 'C':
860 /* CPU and PID are mutually exclusive */
861 if (tid != -1) {
862 printf("WARNING: CPU switch overriding PID\n");
863 sleep(1);
864 tid = -1;
865 }
866 profile_cpu = atoi(optarg); break;
867 case 'd': delay_secs = atoi(optarg); break;
868 case 'D': dump_symtab = 1; break;
869
870 case 'e': error = parse_events(optarg); break;
871
872 case 'f': count_filter = atoi(optarg); break;
873 case 'g': group = atoi(optarg); break;
874 case 'h': display_help(); break;
875 case 'l': scale = 1; break;
876 case 'n': nmi = atoi(optarg); break;
877 case 'p':
878 /* CPU and PID are mutually exclusive */
879 if (profile_cpu != -1) {
880 printf("WARNING: PID switch overriding CPU\n");
881 sleep(1);
882 profile_cpu = -1;
883 }
884 tid = atoi(optarg); break;
885 case 'r': realtime_prio = atoi(optarg); break;
886 case 's': sym_filter = strdup(optarg); break;
887 case 'x': vmlinux = strdup(optarg); break;
888 case 'z': zero = 1; break;
889 case 'm': mmap_pages = atoi(optarg); break;
890 case 'M': use_mmap = 1; break;
891 case 'U': use_munmap = 1; break;
892 default: error = 1; break;
893 }
894 }
895 if (error)
896 display_help();
897
898 if (!nr_counters) {
899 nr_counters = 1;
900 event_id[0] = 0;
901 }
902
903 for (counter = 0; counter < nr_counters; counter++) {
904 if (event_count[counter])
905 continue;
906
907 event_count[counter] = default_interval;
908 }
909 }
910
911 struct mmap_data {
912 int counter;
913 void *base;
914 unsigned int mask;
915 unsigned int prev;
916 };
917
918 static unsigned int mmap_read_head(struct mmap_data *md)
919 {
920 struct perf_counter_mmap_page *pc = md->base;
921 int head;
922
923 head = pc->data_head;
924 rmb();
925
926 return head;
927 }
928
929 struct timeval last_read, this_read;
930
931 static void mmap_read(struct mmap_data *md)
932 {
933 unsigned int head = mmap_read_head(md);
934 unsigned int old = md->prev;
935 unsigned char *data = md->base + page_size;
936 int diff;
937
938 gettimeofday(&this_read, NULL);
939
940 /*
941 * If we're further behind than half the buffer, there's a chance
942 * the writer will bite our tail and screw up the events under us.
943 *
944 * If we somehow ended up ahead of the head, we got messed up.
945 *
946 * In either case, truncate and restart at head.
947 */
948 diff = head - old;
949 if (diff > md->mask / 2 || diff < 0) {
950 struct timeval iv;
951 unsigned long msecs;
952
953 timersub(&this_read, &last_read, &iv);
954 msecs = iv.tv_sec*1000 + iv.tv_usec/1000;
955
956 fprintf(stderr, "WARNING: failed to keep up with mmap data."
957 " Last read %lu msecs ago.\n", msecs);
958
959 /*
960 * head points to a known good entry, start there.
961 */
962 old = head;
963 }
964
965 last_read = this_read;
966
967 for (; old != head;) {
968 struct ip_event {
969 struct perf_event_header header;
970 __u64 ip;
971 __u32 pid, tid;
972 };
973 struct mmap_event {
974 struct perf_event_header header;
975 __u32 pid, tid;
976 __u64 start;
977 __u64 len;
978 __u64 pgoff;
979 char filename[PATH_MAX];
980 };
981
982 typedef union event_union {
983 struct perf_event_header header;
984 struct ip_event ip;
985 struct mmap_event mmap;
986 } event_t;
987
988 event_t *event = (event_t *)&data[old & md->mask];
989
990 event_t event_copy;
991
992 size_t size = event->header.size;
993
994 /*
995 * Event straddles the mmap boundary -- header should always
996 * be inside due to u64 alignment of output.
997 */
998 if ((old & md->mask) + size != ((old + size) & md->mask)) {
999 unsigned int offset = old;
1000 unsigned int len = min(sizeof(*event), size), cpy;
1001 void *dst = &event_copy;
1002
1003 do {
1004 cpy = min(md->mask + 1 - (offset & md->mask), len);
1005 memcpy(dst, &data[offset & md->mask], cpy);
1006 offset += cpy;
1007 dst += cpy;
1008 len -= cpy;
1009 } while (len);
1010
1011 event = &event_copy;
1012 }
1013
1014 old += size;
1015
1016 if (event->header.misc & PERF_EVENT_MISC_OVERFLOW) {
1017 if (event->header.type & PERF_RECORD_IP)
1018 process_event(event->ip.ip, md->counter);
1019 } else {
1020 switch (event->header.type) {
1021 case PERF_EVENT_MMAP:
1022 case PERF_EVENT_MUNMAP:
1023 printf("%s: %Lu %Lu %Lu %s\n",
1024 event->header.type == PERF_EVENT_MMAP
1025 ? "mmap" : "munmap",
1026 event->mmap.start,
1027 event->mmap.len,
1028 event->mmap.pgoff,
1029 event->mmap.filename);
1030 break;
1031 }
1032 }
1033 }
1034
1035 md->prev = old;
1036 }
1037
1038 int cmd_top(int argc, char **argv, const char *prefix)
1039 {
1040 struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS];
1041 struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS];
1042 struct perf_counter_hw_event hw_event;
1043 pthread_t thread;
1044 int i, counter, group_fd, nr_poll = 0;
1045 unsigned int cpu;
1046 int ret;
1047
1048 page_size = sysconf(_SC_PAGE_SIZE);
1049
1050 process_options(argc, argv);
1051
1052 nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
1053 assert(nr_cpus <= MAX_NR_CPUS);
1054 assert(nr_cpus >= 0);
1055
1056 if (tid != -1 || profile_cpu != -1)
1057 nr_cpus = 1;
1058
1059 parse_symbols();
1060 if (vmlinux && sym_filter_entry)
1061 parse_vmlinux(vmlinux);
1062
1063 for (i = 0; i < nr_cpus; i++) {
1064 group_fd = -1;
1065 for (counter = 0; counter < nr_counters; counter++) {
1066
1067 cpu = profile_cpu;
1068 if (tid == -1 && profile_cpu == -1)
1069 cpu = i;
1070
1071 memset(&hw_event, 0, sizeof(hw_event));
1072 hw_event.config = event_id[counter];
1073 hw_event.irq_period = event_count[counter];
1074 hw_event.record_type = PERF_RECORD_IP | PERF_RECORD_TID;
1075 hw_event.nmi = nmi;
1076 hw_event.mmap = use_mmap;
1077 hw_event.munmap = use_munmap;
1078
1079 fd[i][counter] = sys_perf_counter_open(&hw_event, tid, cpu, group_fd, 0);
1080 if (fd[i][counter] < 0) {
1081 int err = errno;
1082 printf("kerneltop error: syscall returned with %d (%s)\n",
1083 fd[i][counter], strerror(err));
1084 if (err == EPERM)
1085 printf("Are you root?\n");
1086 exit(-1);
1087 }
1088 assert(fd[i][counter] >= 0);
1089 fcntl(fd[i][counter], F_SETFL, O_NONBLOCK);
1090
1091 /*
1092 * First counter acts as the group leader:
1093 */
1094 if (group && group_fd == -1)
1095 group_fd = fd[i][counter];
1096
1097 event_array[nr_poll].fd = fd[i][counter];
1098 event_array[nr_poll].events = POLLIN;
1099 nr_poll++;
1100
1101 mmap_array[i][counter].counter = counter;
1102 mmap_array[i][counter].prev = 0;
1103 mmap_array[i][counter].mask = mmap_pages*page_size - 1;
1104 mmap_array[i][counter].base = mmap(NULL, (mmap_pages+1)*page_size,
1105 PROT_READ, MAP_SHARED, fd[i][counter], 0);
1106 if (mmap_array[i][counter].base == MAP_FAILED) {
1107 printf("kerneltop error: failed to mmap with %d (%s)\n",
1108 errno, strerror(errno));
1109 exit(-1);
1110 }
1111 }
1112 }
1113
1114 if (pthread_create(&thread, NULL, display_thread, NULL)) {
1115 printf("Could not create display thread.\n");
1116 exit(-1);
1117 }
1118
1119 if (realtime_prio) {
1120 struct sched_param param;
1121
1122 param.sched_priority = realtime_prio;
1123 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
1124 printf("Could not set realtime priority.\n");
1125 exit(-1);
1126 }
1127 }
1128
1129 while (1) {
1130 int hits = events;
1131
1132 for (i = 0; i < nr_cpus; i++) {
1133 for (counter = 0; counter < nr_counters; counter++)
1134 mmap_read(&mmap_array[i][counter]);
1135 }
1136
1137 if (hits == events)
1138 ret = poll(event_array, nr_poll, 100);
1139 }
1140
1141 return 0;
1142 }
This page took 0.053592 seconds and 6 git commands to generate.