Merge branch 'picoxcell/timer' into next/timer
[deliverable/linux.git] / tools / perf / util / parse-events.c
1 #include "../../../include/linux/hw_breakpoint.h"
2 #include "util.h"
3 #include "../perf.h"
4 #include "evlist.h"
5 #include "evsel.h"
6 #include "parse-options.h"
7 #include "parse-events.h"
8 #include "exec_cmd.h"
9 #include "string.h"
10 #include "symbol.h"
11 #include "cache.h"
12 #include "header.h"
13 #include "debugfs.h"
14 #include "parse-events-flex.h"
15 #include "pmu.h"
16
17 #define MAX_NAME_LEN 100
18
19 struct event_symbol {
20 u8 type;
21 u64 config;
22 const char *symbol;
23 const char *alias;
24 };
25
26 #ifdef PARSER_DEBUG
27 extern int parse_events_debug;
28 #endif
29 int parse_events_parse(struct list_head *list, int *idx);
30
31 #define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x
32 #define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x
33
34 static struct event_symbol event_symbols[] = {
35 { CHW(CPU_CYCLES), "cpu-cycles", "cycles" },
36 { CHW(STALLED_CYCLES_FRONTEND), "stalled-cycles-frontend", "idle-cycles-frontend" },
37 { CHW(STALLED_CYCLES_BACKEND), "stalled-cycles-backend", "idle-cycles-backend" },
38 { CHW(INSTRUCTIONS), "instructions", "" },
39 { CHW(CACHE_REFERENCES), "cache-references", "" },
40 { CHW(CACHE_MISSES), "cache-misses", "" },
41 { CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" },
42 { CHW(BRANCH_MISSES), "branch-misses", "" },
43 { CHW(BUS_CYCLES), "bus-cycles", "" },
44 { CHW(REF_CPU_CYCLES), "ref-cycles", "" },
45
46 { CSW(CPU_CLOCK), "cpu-clock", "" },
47 { CSW(TASK_CLOCK), "task-clock", "" },
48 { CSW(PAGE_FAULTS), "page-faults", "faults" },
49 { CSW(PAGE_FAULTS_MIN), "minor-faults", "" },
50 { CSW(PAGE_FAULTS_MAJ), "major-faults", "" },
51 { CSW(CONTEXT_SWITCHES), "context-switches", "cs" },
52 { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" },
53 { CSW(ALIGNMENT_FAULTS), "alignment-faults", "" },
54 { CSW(EMULATION_FAULTS), "emulation-faults", "" },
55 };
56
57 #define __PERF_EVENT_FIELD(config, name) \
58 ((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT)
59
60 #define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW)
61 #define PERF_EVENT_CONFIG(config) __PERF_EVENT_FIELD(config, CONFIG)
62 #define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE)
63 #define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT)
64
65 static const char *sw_event_names[PERF_COUNT_SW_MAX] = {
66 "cpu-clock",
67 "task-clock",
68 "page-faults",
69 "context-switches",
70 "CPU-migrations",
71 "minor-faults",
72 "major-faults",
73 "alignment-faults",
74 "emulation-faults",
75 };
76
77 #define MAX_ALIASES 8
78
79 static const char *hw_cache[PERF_COUNT_HW_CACHE_MAX][MAX_ALIASES] = {
80 { "L1-dcache", "l1-d", "l1d", "L1-data", },
81 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
82 { "LLC", "L2", },
83 { "dTLB", "d-tlb", "Data-TLB", },
84 { "iTLB", "i-tlb", "Instruction-TLB", },
85 { "branch", "branches", "bpu", "btb", "bpc", },
86 { "node", },
87 };
88
89 static const char *hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX][MAX_ALIASES] = {
90 { "load", "loads", "read", },
91 { "store", "stores", "write", },
92 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
93 };
94
95 static const char *hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
96 [MAX_ALIASES] = {
97 { "refs", "Reference", "ops", "access", },
98 { "misses", "miss", },
99 };
100
101 #define C(x) PERF_COUNT_HW_CACHE_##x
102 #define CACHE_READ (1 << C(OP_READ))
103 #define CACHE_WRITE (1 << C(OP_WRITE))
104 #define CACHE_PREFETCH (1 << C(OP_PREFETCH))
105 #define COP(x) (1 << x)
106
107 /*
108 * cache operartion stat
109 * L1I : Read and prefetch only
110 * ITLB and BPU : Read-only
111 */
112 static unsigned long hw_cache_stat[C(MAX)] = {
113 [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
114 [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
115 [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
116 [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
117 [C(ITLB)] = (CACHE_READ),
118 [C(BPU)] = (CACHE_READ),
119 [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
120 };
121
122 #define for_each_subsystem(sys_dir, sys_dirent, sys_next) \
123 while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next) \
124 if (sys_dirent.d_type == DT_DIR && \
125 (strcmp(sys_dirent.d_name, ".")) && \
126 (strcmp(sys_dirent.d_name, "..")))
127
128 static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir)
129 {
130 char evt_path[MAXPATHLEN];
131 int fd;
132
133 snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", tracing_events_path,
134 sys_dir->d_name, evt_dir->d_name);
135 fd = open(evt_path, O_RDONLY);
136 if (fd < 0)
137 return -EINVAL;
138 close(fd);
139
140 return 0;
141 }
142
143 #define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) \
144 while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next) \
145 if (evt_dirent.d_type == DT_DIR && \
146 (strcmp(evt_dirent.d_name, ".")) && \
147 (strcmp(evt_dirent.d_name, "..")) && \
148 (!tp_event_has_id(&sys_dirent, &evt_dirent)))
149
150 #define MAX_EVENT_LENGTH 512
151
152
153 struct tracepoint_path *tracepoint_id_to_path(u64 config)
154 {
155 struct tracepoint_path *path = NULL;
156 DIR *sys_dir, *evt_dir;
157 struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
158 char id_buf[24];
159 int fd;
160 u64 id;
161 char evt_path[MAXPATHLEN];
162 char dir_path[MAXPATHLEN];
163
164 if (debugfs_valid_mountpoint(tracing_events_path))
165 return NULL;
166
167 sys_dir = opendir(tracing_events_path);
168 if (!sys_dir)
169 return NULL;
170
171 for_each_subsystem(sys_dir, sys_dirent, sys_next) {
172
173 snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
174 sys_dirent.d_name);
175 evt_dir = opendir(dir_path);
176 if (!evt_dir)
177 continue;
178
179 for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
180
181 snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path,
182 evt_dirent.d_name);
183 fd = open(evt_path, O_RDONLY);
184 if (fd < 0)
185 continue;
186 if (read(fd, id_buf, sizeof(id_buf)) < 0) {
187 close(fd);
188 continue;
189 }
190 close(fd);
191 id = atoll(id_buf);
192 if (id == config) {
193 closedir(evt_dir);
194 closedir(sys_dir);
195 path = zalloc(sizeof(*path));
196 path->system = malloc(MAX_EVENT_LENGTH);
197 if (!path->system) {
198 free(path);
199 return NULL;
200 }
201 path->name = malloc(MAX_EVENT_LENGTH);
202 if (!path->name) {
203 free(path->system);
204 free(path);
205 return NULL;
206 }
207 strncpy(path->system, sys_dirent.d_name,
208 MAX_EVENT_LENGTH);
209 strncpy(path->name, evt_dirent.d_name,
210 MAX_EVENT_LENGTH);
211 return path;
212 }
213 }
214 closedir(evt_dir);
215 }
216
217 closedir(sys_dir);
218 return NULL;
219 }
220
221 #define TP_PATH_LEN (MAX_EVENT_LENGTH * 2 + 1)
222 static const char *tracepoint_id_to_name(u64 config)
223 {
224 static char buf[TP_PATH_LEN];
225 struct tracepoint_path *path;
226
227 path = tracepoint_id_to_path(config);
228 if (path) {
229 snprintf(buf, TP_PATH_LEN, "%s:%s", path->system, path->name);
230 free(path->name);
231 free(path->system);
232 free(path);
233 } else
234 snprintf(buf, TP_PATH_LEN, "%s:%s", "unknown", "unknown");
235
236 return buf;
237 }
238
239 static int is_cache_op_valid(u8 cache_type, u8 cache_op)
240 {
241 if (hw_cache_stat[cache_type] & COP(cache_op))
242 return 1; /* valid */
243 else
244 return 0; /* invalid */
245 }
246
247 static char *event_cache_name(u8 cache_type, u8 cache_op, u8 cache_result)
248 {
249 static char name[50];
250
251 if (cache_result) {
252 sprintf(name, "%s-%s-%s", hw_cache[cache_type][0],
253 hw_cache_op[cache_op][0],
254 hw_cache_result[cache_result][0]);
255 } else {
256 sprintf(name, "%s-%s", hw_cache[cache_type][0],
257 hw_cache_op[cache_op][1]);
258 }
259
260 return name;
261 }
262
263 const char *event_type(int type)
264 {
265 switch (type) {
266 case PERF_TYPE_HARDWARE:
267 return "hardware";
268
269 case PERF_TYPE_SOFTWARE:
270 return "software";
271
272 case PERF_TYPE_TRACEPOINT:
273 return "tracepoint";
274
275 case PERF_TYPE_HW_CACHE:
276 return "hardware-cache";
277
278 default:
279 break;
280 }
281
282 return "unknown";
283 }
284
285 const char *event_name(struct perf_evsel *evsel)
286 {
287 u64 config = evsel->attr.config;
288 int type = evsel->attr.type;
289
290 if (type == PERF_TYPE_RAW || type == PERF_TYPE_HARDWARE) {
291 /*
292 * XXX minimal fix, see comment on perf_evsen__name, this static buffer
293 * will go away together with event_name in the next devel cycle.
294 */
295 static char bf[128];
296 perf_evsel__name(evsel, bf, sizeof(bf));
297 return bf;
298 }
299
300 if (evsel->name)
301 return evsel->name;
302
303 return __event_name(type, config);
304 }
305
306 const char *__event_name(int type, u64 config)
307 {
308 static char buf[32];
309
310 if (type == PERF_TYPE_RAW) {
311 sprintf(buf, "raw 0x%" PRIx64, config);
312 return buf;
313 }
314
315 switch (type) {
316 case PERF_TYPE_HARDWARE:
317 return __perf_evsel__hw_name(config);
318
319 case PERF_TYPE_HW_CACHE: {
320 u8 cache_type, cache_op, cache_result;
321
322 cache_type = (config >> 0) & 0xff;
323 if (cache_type > PERF_COUNT_HW_CACHE_MAX)
324 return "unknown-ext-hardware-cache-type";
325
326 cache_op = (config >> 8) & 0xff;
327 if (cache_op > PERF_COUNT_HW_CACHE_OP_MAX)
328 return "unknown-ext-hardware-cache-op";
329
330 cache_result = (config >> 16) & 0xff;
331 if (cache_result > PERF_COUNT_HW_CACHE_RESULT_MAX)
332 return "unknown-ext-hardware-cache-result";
333
334 if (!is_cache_op_valid(cache_type, cache_op))
335 return "invalid-cache";
336
337 return event_cache_name(cache_type, cache_op, cache_result);
338 }
339
340 case PERF_TYPE_SOFTWARE:
341 if (config < PERF_COUNT_SW_MAX && sw_event_names[config])
342 return sw_event_names[config];
343 return "unknown-software";
344
345 case PERF_TYPE_TRACEPOINT:
346 return tracepoint_id_to_name(config);
347
348 default:
349 break;
350 }
351
352 return "unknown";
353 }
354
355 static int add_event(struct list_head **_list, int *idx,
356 struct perf_event_attr *attr, char *name)
357 {
358 struct perf_evsel *evsel;
359 struct list_head *list = *_list;
360
361 if (!list) {
362 list = malloc(sizeof(*list));
363 if (!list)
364 return -ENOMEM;
365 INIT_LIST_HEAD(list);
366 }
367
368 event_attr_init(attr);
369
370 evsel = perf_evsel__new(attr, (*idx)++);
371 if (!evsel) {
372 free(list);
373 return -ENOMEM;
374 }
375
376 evsel->name = strdup(name);
377 list_add_tail(&evsel->node, list);
378 *_list = list;
379 return 0;
380 }
381
382 static int parse_aliases(char *str, const char *names[][MAX_ALIASES], int size)
383 {
384 int i, j;
385 int n, longest = -1;
386
387 for (i = 0; i < size; i++) {
388 for (j = 0; j < MAX_ALIASES && names[i][j]; j++) {
389 n = strlen(names[i][j]);
390 if (n > longest && !strncasecmp(str, names[i][j], n))
391 longest = n;
392 }
393 if (longest > 0)
394 return i;
395 }
396
397 return -1;
398 }
399
400 int parse_events_add_cache(struct list_head **list, int *idx,
401 char *type, char *op_result1, char *op_result2)
402 {
403 struct perf_event_attr attr;
404 char name[MAX_NAME_LEN];
405 int cache_type = -1, cache_op = -1, cache_result = -1;
406 char *op_result[2] = { op_result1, op_result2 };
407 int i, n;
408
409 /*
410 * No fallback - if we cannot get a clear cache type
411 * then bail out:
412 */
413 cache_type = parse_aliases(type, hw_cache,
414 PERF_COUNT_HW_CACHE_MAX);
415 if (cache_type == -1)
416 return -EINVAL;
417
418 n = snprintf(name, MAX_NAME_LEN, "%s", type);
419
420 for (i = 0; (i < 2) && (op_result[i]); i++) {
421 char *str = op_result[i];
422
423 snprintf(name + n, MAX_NAME_LEN - n, "-%s\n", str);
424
425 if (cache_op == -1) {
426 cache_op = parse_aliases(str, hw_cache_op,
427 PERF_COUNT_HW_CACHE_OP_MAX);
428 if (cache_op >= 0) {
429 if (!is_cache_op_valid(cache_type, cache_op))
430 return -EINVAL;
431 continue;
432 }
433 }
434
435 if (cache_result == -1) {
436 cache_result = parse_aliases(str, hw_cache_result,
437 PERF_COUNT_HW_CACHE_RESULT_MAX);
438 if (cache_result >= 0)
439 continue;
440 }
441 }
442
443 /*
444 * Fall back to reads:
445 */
446 if (cache_op == -1)
447 cache_op = PERF_COUNT_HW_CACHE_OP_READ;
448
449 /*
450 * Fall back to accesses:
451 */
452 if (cache_result == -1)
453 cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS;
454
455 memset(&attr, 0, sizeof(attr));
456 attr.config = cache_type | (cache_op << 8) | (cache_result << 16);
457 attr.type = PERF_TYPE_HW_CACHE;
458 return add_event(list, idx, &attr, name);
459 }
460
461 static int add_tracepoint(struct list_head **list, int *idx,
462 char *sys_name, char *evt_name)
463 {
464 struct perf_event_attr attr;
465 char name[MAX_NAME_LEN];
466 char evt_path[MAXPATHLEN];
467 char id_buf[4];
468 u64 id;
469 int fd;
470
471 snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", tracing_events_path,
472 sys_name, evt_name);
473
474 fd = open(evt_path, O_RDONLY);
475 if (fd < 0)
476 return -1;
477
478 if (read(fd, id_buf, sizeof(id_buf)) < 0) {
479 close(fd);
480 return -1;
481 }
482
483 close(fd);
484 id = atoll(id_buf);
485
486 memset(&attr, 0, sizeof(attr));
487 attr.config = id;
488 attr.type = PERF_TYPE_TRACEPOINT;
489 attr.sample_type |= PERF_SAMPLE_RAW;
490 attr.sample_type |= PERF_SAMPLE_TIME;
491 attr.sample_type |= PERF_SAMPLE_CPU;
492 attr.sample_period = 1;
493
494 snprintf(name, MAX_NAME_LEN, "%s:%s", sys_name, evt_name);
495 return add_event(list, idx, &attr, name);
496 }
497
498 static int add_tracepoint_multi(struct list_head **list, int *idx,
499 char *sys_name, char *evt_name)
500 {
501 char evt_path[MAXPATHLEN];
502 struct dirent *evt_ent;
503 DIR *evt_dir;
504 int ret = 0;
505
506 snprintf(evt_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_name);
507 evt_dir = opendir(evt_path);
508 if (!evt_dir) {
509 perror("Can't open event dir");
510 return -1;
511 }
512
513 while (!ret && (evt_ent = readdir(evt_dir))) {
514 if (!strcmp(evt_ent->d_name, ".")
515 || !strcmp(evt_ent->d_name, "..")
516 || !strcmp(evt_ent->d_name, "enable")
517 || !strcmp(evt_ent->d_name, "filter"))
518 continue;
519
520 if (!strglobmatch(evt_ent->d_name, evt_name))
521 continue;
522
523 ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name);
524 }
525
526 return ret;
527 }
528
529 int parse_events_add_tracepoint(struct list_head **list, int *idx,
530 char *sys, char *event)
531 {
532 int ret;
533
534 ret = debugfs_valid_mountpoint(tracing_events_path);
535 if (ret)
536 return ret;
537
538 return strpbrk(event, "*?") ?
539 add_tracepoint_multi(list, idx, sys, event) :
540 add_tracepoint(list, idx, sys, event);
541 }
542
543 static int
544 parse_breakpoint_type(const char *type, struct perf_event_attr *attr)
545 {
546 int i;
547
548 for (i = 0; i < 3; i++) {
549 if (!type || !type[i])
550 break;
551
552 switch (type[i]) {
553 case 'r':
554 attr->bp_type |= HW_BREAKPOINT_R;
555 break;
556 case 'w':
557 attr->bp_type |= HW_BREAKPOINT_W;
558 break;
559 case 'x':
560 attr->bp_type |= HW_BREAKPOINT_X;
561 break;
562 default:
563 return -EINVAL;
564 }
565 }
566
567 if (!attr->bp_type) /* Default */
568 attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
569
570 return 0;
571 }
572
573 int parse_events_add_breakpoint(struct list_head **list, int *idx,
574 void *ptr, char *type)
575 {
576 struct perf_event_attr attr;
577 char name[MAX_NAME_LEN];
578
579 memset(&attr, 0, sizeof(attr));
580 attr.bp_addr = (unsigned long) ptr;
581
582 if (parse_breakpoint_type(type, &attr))
583 return -EINVAL;
584
585 /*
586 * We should find a nice way to override the access length
587 * Provide some defaults for now
588 */
589 if (attr.bp_type == HW_BREAKPOINT_X)
590 attr.bp_len = sizeof(long);
591 else
592 attr.bp_len = HW_BREAKPOINT_LEN_4;
593
594 attr.type = PERF_TYPE_BREAKPOINT;
595
596 snprintf(name, MAX_NAME_LEN, "mem:%p:%s", ptr, type ? type : "rw");
597 return add_event(list, idx, &attr, name);
598 }
599
600 static int config_term(struct perf_event_attr *attr,
601 struct parse_events__term *term)
602 {
603 #define CHECK_TYPE_VAL(type) \
604 do { \
605 if (PARSE_EVENTS__TERM_TYPE_ ## type != term->type_val) \
606 return -EINVAL; \
607 } while (0)
608
609 switch (term->type_term) {
610 case PARSE_EVENTS__TERM_TYPE_CONFIG:
611 CHECK_TYPE_VAL(NUM);
612 attr->config = term->val.num;
613 break;
614 case PARSE_EVENTS__TERM_TYPE_CONFIG1:
615 CHECK_TYPE_VAL(NUM);
616 attr->config1 = term->val.num;
617 break;
618 case PARSE_EVENTS__TERM_TYPE_CONFIG2:
619 CHECK_TYPE_VAL(NUM);
620 attr->config2 = term->val.num;
621 break;
622 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
623 CHECK_TYPE_VAL(NUM);
624 attr->sample_period = term->val.num;
625 break;
626 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
627 /*
628 * TODO uncomment when the field is available
629 * attr->branch_sample_type = term->val.num;
630 */
631 break;
632 case PARSE_EVENTS__TERM_TYPE_NAME:
633 CHECK_TYPE_VAL(STR);
634 break;
635 default:
636 return -EINVAL;
637 }
638
639 return 0;
640 #undef CHECK_TYPE_VAL
641 }
642
643 static int config_attr(struct perf_event_attr *attr,
644 struct list_head *head, int fail)
645 {
646 struct parse_events__term *term;
647
648 list_for_each_entry(term, head, list)
649 if (config_term(attr, term) && fail)
650 return -EINVAL;
651
652 return 0;
653 }
654
655 int parse_events_add_numeric(struct list_head **list, int *idx,
656 unsigned long type, unsigned long config,
657 struct list_head *head_config)
658 {
659 struct perf_event_attr attr;
660
661 memset(&attr, 0, sizeof(attr));
662 attr.type = type;
663 attr.config = config;
664
665 if (head_config &&
666 config_attr(&attr, head_config, 1))
667 return -EINVAL;
668
669 return add_event(list, idx, &attr,
670 (char *) __event_name(type, config));
671 }
672
673 static int parse_events__is_name_term(struct parse_events__term *term)
674 {
675 return term->type_term == PARSE_EVENTS__TERM_TYPE_NAME;
676 }
677
678 static char *pmu_event_name(struct perf_event_attr *attr,
679 struct list_head *head_terms)
680 {
681 struct parse_events__term *term;
682
683 list_for_each_entry(term, head_terms, list)
684 if (parse_events__is_name_term(term))
685 return term->val.str;
686
687 return (char *) __event_name(PERF_TYPE_RAW, attr->config);
688 }
689
690 int parse_events_add_pmu(struct list_head **list, int *idx,
691 char *name, struct list_head *head_config)
692 {
693 struct perf_event_attr attr;
694 struct perf_pmu *pmu;
695
696 pmu = perf_pmu__find(name);
697 if (!pmu)
698 return -EINVAL;
699
700 memset(&attr, 0, sizeof(attr));
701
702 /*
703 * Configure hardcoded terms first, no need to check
704 * return value when called with fail == 0 ;)
705 */
706 config_attr(&attr, head_config, 0);
707
708 if (perf_pmu__config(pmu, &attr, head_config))
709 return -EINVAL;
710
711 return add_event(list, idx, &attr,
712 pmu_event_name(&attr, head_config));
713 }
714
715 void parse_events_update_lists(struct list_head *list_event,
716 struct list_head *list_all)
717 {
718 /*
719 * Called for single event definition. Update the
720 * 'all event' list, and reinit the 'signle event'
721 * list, for next event definition.
722 */
723 list_splice_tail(list_event, list_all);
724 free(list_event);
725 }
726
727 int parse_events_modifier(struct list_head *list, char *str)
728 {
729 struct perf_evsel *evsel;
730 int exclude = 0, exclude_GH = 0;
731 int eu = 0, ek = 0, eh = 0, eH = 0, eG = 0, precise = 0;
732
733 if (str == NULL)
734 return 0;
735
736 while (*str) {
737 if (*str == 'u') {
738 if (!exclude)
739 exclude = eu = ek = eh = 1;
740 eu = 0;
741 } else if (*str == 'k') {
742 if (!exclude)
743 exclude = eu = ek = eh = 1;
744 ek = 0;
745 } else if (*str == 'h') {
746 if (!exclude)
747 exclude = eu = ek = eh = 1;
748 eh = 0;
749 } else if (*str == 'G') {
750 if (!exclude_GH)
751 exclude_GH = eG = eH = 1;
752 eG = 0;
753 } else if (*str == 'H') {
754 if (!exclude_GH)
755 exclude_GH = eG = eH = 1;
756 eH = 0;
757 } else if (*str == 'p') {
758 precise++;
759 } else
760 break;
761
762 ++str;
763 }
764
765 /*
766 * precise ip:
767 *
768 * 0 - SAMPLE_IP can have arbitrary skid
769 * 1 - SAMPLE_IP must have constant skid
770 * 2 - SAMPLE_IP requested to have 0 skid
771 * 3 - SAMPLE_IP must have 0 skid
772 *
773 * See also PERF_RECORD_MISC_EXACT_IP
774 */
775 if (precise > 3)
776 return -EINVAL;
777
778 list_for_each_entry(evsel, list, node) {
779 evsel->attr.exclude_user = eu;
780 evsel->attr.exclude_kernel = ek;
781 evsel->attr.exclude_hv = eh;
782 evsel->attr.precise_ip = precise;
783 evsel->attr.exclude_host = eH;
784 evsel->attr.exclude_guest = eG;
785 }
786
787 return 0;
788 }
789
790 int parse_events(struct perf_evlist *evlist, const char *str, int unset __used)
791 {
792 LIST_HEAD(list);
793 LIST_HEAD(list_tmp);
794 YY_BUFFER_STATE buffer;
795 int ret, idx = evlist->nr_entries;
796
797 buffer = parse_events__scan_string(str);
798
799 #ifdef PARSER_DEBUG
800 parse_events_debug = 1;
801 #endif
802 ret = parse_events_parse(&list, &idx);
803
804 parse_events__flush_buffer(buffer);
805 parse_events__delete_buffer(buffer);
806 parse_events_lex_destroy();
807
808 if (!ret) {
809 int entries = idx - evlist->nr_entries;
810 perf_evlist__splice_list_tail(evlist, &list, entries);
811 return 0;
812 }
813
814 /*
815 * There are 2 users - builtin-record and builtin-test objects.
816 * Both call perf_evlist__delete in case of error, so we dont
817 * need to bother.
818 */
819 fprintf(stderr, "invalid or unsupported event: '%s'\n", str);
820 fprintf(stderr, "Run 'perf list' for a list of valid events\n");
821 return ret;
822 }
823
824 int parse_events_option(const struct option *opt, const char *str,
825 int unset __used)
826 {
827 struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
828 return parse_events(evlist, str, unset);
829 }
830
831 int parse_filter(const struct option *opt, const char *str,
832 int unset __used)
833 {
834 struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
835 struct perf_evsel *last = NULL;
836
837 if (evlist->nr_entries > 0)
838 last = list_entry(evlist->entries.prev, struct perf_evsel, node);
839
840 if (last == NULL || last->attr.type != PERF_TYPE_TRACEPOINT) {
841 fprintf(stderr,
842 "-F option should follow a -e tracepoint option\n");
843 return -1;
844 }
845
846 last->filter = strdup(str);
847 if (last->filter == NULL) {
848 fprintf(stderr, "not enough memory to hold filter string\n");
849 return -1;
850 }
851
852 return 0;
853 }
854
855 static const char * const event_type_descriptors[] = {
856 "Hardware event",
857 "Software event",
858 "Tracepoint event",
859 "Hardware cache event",
860 "Raw hardware event descriptor",
861 "Hardware breakpoint",
862 };
863
864 /*
865 * Print the events from <debugfs_mount_point>/tracing/events
866 */
867
868 void print_tracepoint_events(const char *subsys_glob, const char *event_glob)
869 {
870 DIR *sys_dir, *evt_dir;
871 struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
872 char evt_path[MAXPATHLEN];
873 char dir_path[MAXPATHLEN];
874
875 if (debugfs_valid_mountpoint(tracing_events_path))
876 return;
877
878 sys_dir = opendir(tracing_events_path);
879 if (!sys_dir)
880 return;
881
882 for_each_subsystem(sys_dir, sys_dirent, sys_next) {
883 if (subsys_glob != NULL &&
884 !strglobmatch(sys_dirent.d_name, subsys_glob))
885 continue;
886
887 snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
888 sys_dirent.d_name);
889 evt_dir = opendir(dir_path);
890 if (!evt_dir)
891 continue;
892
893 for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
894 if (event_glob != NULL &&
895 !strglobmatch(evt_dirent.d_name, event_glob))
896 continue;
897
898 snprintf(evt_path, MAXPATHLEN, "%s:%s",
899 sys_dirent.d_name, evt_dirent.d_name);
900 printf(" %-50s [%s]\n", evt_path,
901 event_type_descriptors[PERF_TYPE_TRACEPOINT]);
902 }
903 closedir(evt_dir);
904 }
905 closedir(sys_dir);
906 }
907
908 /*
909 * Check whether event is in <debugfs_mount_point>/tracing/events
910 */
911
912 int is_valid_tracepoint(const char *event_string)
913 {
914 DIR *sys_dir, *evt_dir;
915 struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
916 char evt_path[MAXPATHLEN];
917 char dir_path[MAXPATHLEN];
918
919 if (debugfs_valid_mountpoint(tracing_events_path))
920 return 0;
921
922 sys_dir = opendir(tracing_events_path);
923 if (!sys_dir)
924 return 0;
925
926 for_each_subsystem(sys_dir, sys_dirent, sys_next) {
927
928 snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
929 sys_dirent.d_name);
930 evt_dir = opendir(dir_path);
931 if (!evt_dir)
932 continue;
933
934 for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
935 snprintf(evt_path, MAXPATHLEN, "%s:%s",
936 sys_dirent.d_name, evt_dirent.d_name);
937 if (!strcmp(evt_path, event_string)) {
938 closedir(evt_dir);
939 closedir(sys_dir);
940 return 1;
941 }
942 }
943 closedir(evt_dir);
944 }
945 closedir(sys_dir);
946 return 0;
947 }
948
949 void print_events_type(u8 type)
950 {
951 struct event_symbol *syms = event_symbols;
952 unsigned int i;
953 char name[64];
954
955 for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) {
956 if (type != syms->type)
957 continue;
958
959 if (strlen(syms->alias))
960 snprintf(name, sizeof(name), "%s OR %s",
961 syms->symbol, syms->alias);
962 else
963 snprintf(name, sizeof(name), "%s", syms->symbol);
964
965 printf(" %-50s [%s]\n", name,
966 event_type_descriptors[type]);
967 }
968 }
969
970 int print_hwcache_events(const char *event_glob)
971 {
972 unsigned int type, op, i, printed = 0;
973
974 for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
975 for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
976 /* skip invalid cache type */
977 if (!is_cache_op_valid(type, op))
978 continue;
979
980 for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
981 char *name = event_cache_name(type, op, i);
982
983 if (event_glob != NULL && !strglobmatch(name, event_glob))
984 continue;
985
986 printf(" %-50s [%s]\n", name,
987 event_type_descriptors[PERF_TYPE_HW_CACHE]);
988 ++printed;
989 }
990 }
991 }
992
993 return printed;
994 }
995
996 /*
997 * Print the help text for the event symbols:
998 */
999 void print_events(const char *event_glob)
1000 {
1001 unsigned int i, type, prev_type = -1, printed = 0, ntypes_printed = 0;
1002 struct event_symbol *syms = event_symbols;
1003 char name[MAX_NAME_LEN];
1004
1005 printf("\n");
1006 printf("List of pre-defined events (to be used in -e):\n");
1007
1008 for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) {
1009 type = syms->type;
1010
1011 if (type != prev_type && printed) {
1012 printf("\n");
1013 printed = 0;
1014 ntypes_printed++;
1015 }
1016
1017 if (event_glob != NULL &&
1018 !(strglobmatch(syms->symbol, event_glob) ||
1019 (syms->alias && strglobmatch(syms->alias, event_glob))))
1020 continue;
1021
1022 if (strlen(syms->alias))
1023 snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias);
1024 else
1025 strncpy(name, syms->symbol, MAX_NAME_LEN);
1026 printf(" %-50s [%s]\n", name,
1027 event_type_descriptors[type]);
1028
1029 prev_type = type;
1030 ++printed;
1031 }
1032
1033 if (ntypes_printed) {
1034 printed = 0;
1035 printf("\n");
1036 }
1037 print_hwcache_events(event_glob);
1038
1039 if (event_glob != NULL)
1040 return;
1041
1042 printf("\n");
1043 printf(" %-50s [%s]\n",
1044 "rNNN",
1045 event_type_descriptors[PERF_TYPE_RAW]);
1046 printf(" %-50s [%s]\n",
1047 "cpu/t1=v1[,t2=v2,t3 ...]/modifier",
1048 event_type_descriptors[PERF_TYPE_RAW]);
1049 printf(" (see 'perf list --help' on how to encode it)\n");
1050 printf("\n");
1051
1052 printf(" %-50s [%s]\n",
1053 "mem:<addr>[:access]",
1054 event_type_descriptors[PERF_TYPE_BREAKPOINT]);
1055 printf("\n");
1056
1057 print_tracepoint_events(NULL, NULL);
1058 }
1059
1060 int parse_events__is_hardcoded_term(struct parse_events__term *term)
1061 {
1062 return term->type_term != PARSE_EVENTS__TERM_TYPE_USER;
1063 }
1064
1065 static int new_term(struct parse_events__term **_term, int type_val,
1066 int type_term, char *config,
1067 char *str, long num)
1068 {
1069 struct parse_events__term *term;
1070
1071 term = zalloc(sizeof(*term));
1072 if (!term)
1073 return -ENOMEM;
1074
1075 INIT_LIST_HEAD(&term->list);
1076 term->type_val = type_val;
1077 term->type_term = type_term;
1078 term->config = config;
1079
1080 switch (type_val) {
1081 case PARSE_EVENTS__TERM_TYPE_NUM:
1082 term->val.num = num;
1083 break;
1084 case PARSE_EVENTS__TERM_TYPE_STR:
1085 term->val.str = str;
1086 break;
1087 default:
1088 return -EINVAL;
1089 }
1090
1091 *_term = term;
1092 return 0;
1093 }
1094
1095 int parse_events__term_num(struct parse_events__term **term,
1096 int type_term, char *config, long num)
1097 {
1098 return new_term(term, PARSE_EVENTS__TERM_TYPE_NUM, type_term,
1099 config, NULL, num);
1100 }
1101
1102 int parse_events__term_str(struct parse_events__term **term,
1103 int type_term, char *config, char *str)
1104 {
1105 return new_term(term, PARSE_EVENTS__TERM_TYPE_STR, type_term,
1106 config, str, 0);
1107 }
1108
1109 void parse_events__free_terms(struct list_head *terms)
1110 {
1111 struct parse_events__term *term, *h;
1112
1113 list_for_each_entry_safe(term, h, terms, list)
1114 free(term);
1115
1116 free(terms);
1117 }
This page took 0.068587 seconds and 5 git commands to generate.