Commit | Line | Data |
---|---|---|
cea92ce5 WF |
1 | /* |
2 | * Ioctls that can be done on a perf counter fd: | |
3 | */ | |
4 | #define PERF_COUNTER_IOC_ENABLE _IO('$', 0) | |
5 | #define PERF_COUNTER_IOC_DISABLE _IO('$', 1) | |
6 | ||
7 | /* | |
8 | * prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all | |
9 | * counters in the current task. | |
10 | */ | |
11 | #define PR_TASK_PERF_COUNTERS_DISABLE 31 | |
12 | #define PR_TASK_PERF_COUNTERS_ENABLE 32 | |
13 | ||
cea92ce5 WF |
14 | #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) |
15 | ||
f49012fa WF |
16 | #define rdclock() \ |
17 | ({ \ | |
18 | struct timespec ts; \ | |
19 | \ | |
20 | clock_gettime(CLOCK_MONOTONIC, &ts); \ | |
21 | ts.tv_sec * 1000000000ULL + ts.tv_nsec; \ | |
22 | }) | |
23 | ||
cea92ce5 WF |
24 | /* |
25 | * Pick up some kernel type conventions: | |
26 | */ | |
27 | #define __user | |
28 | #define asmlinkage | |
29 | ||
30 | typedef unsigned int __u32; | |
31 | typedef unsigned long long __u64; | |
32 | typedef long long __s64; | |
33 | ||
34 | /* | |
35 | * User-space ABI bits: | |
36 | */ | |
37 | ||
38 | /* | |
39 | * Generalized performance counter event types, used by the hw_event.type | |
40 | * parameter of the sys_perf_counter_open() syscall: | |
41 | */ | |
42 | enum hw_event_types { | |
43 | /* | |
44 | * Common hardware events, generalized by the kernel: | |
45 | */ | |
46 | PERF_COUNT_CPU_CYCLES = 0, | |
47 | PERF_COUNT_INSTRUCTIONS = 1, | |
48 | PERF_COUNT_CACHE_REFERENCES = 2, | |
49 | PERF_COUNT_CACHE_MISSES = 3, | |
50 | PERF_COUNT_BRANCH_INSTRUCTIONS = 4, | |
51 | PERF_COUNT_BRANCH_MISSES = 5, | |
52 | PERF_COUNT_BUS_CYCLES = 6, | |
53 | ||
54 | PERF_HW_EVENTS_MAX = 7, | |
55 | ||
56 | /* | |
57 | * Special "software" counters provided by the kernel, even if | |
58 | * the hardware does not support performance counters. These | |
59 | * counters measure various physical and sw events of the | |
60 | * kernel (and allow the profiling of them as well): | |
61 | */ | |
62 | PERF_COUNT_CPU_CLOCK = -1, | |
63 | PERF_COUNT_TASK_CLOCK = -2, | |
64 | PERF_COUNT_PAGE_FAULTS = -3, | |
65 | PERF_COUNT_CONTEXT_SWITCHES = -4, | |
66 | PERF_COUNT_CPU_MIGRATIONS = -5, | |
67 | ||
68 | PERF_SW_EVENTS_MIN = -6, | |
69 | }; | |
70 | ||
71 | /* | |
72 | * IRQ-notification data record type: | |
73 | */ | |
74 | enum perf_counter_record_type { | |
75 | PERF_RECORD_SIMPLE = 0, | |
76 | PERF_RECORD_IRQ = 1, | |
77 | PERF_RECORD_GROUP = 2, | |
78 | }; | |
79 | ||
80 | /* | |
81 | * Hardware event to monitor via a performance monitoring counter: | |
82 | */ | |
83 | struct perf_counter_hw_event { | |
84 | __s64 type; | |
85 | ||
86 | __u64 irq_period; | |
87 | __u64 record_type; | |
88 | __u64 read_format; | |
89 | ||
90 | __u64 disabled : 1, /* off by default */ | |
91 | nmi : 1, /* NMI sampling */ | |
92 | raw : 1, /* raw event type */ | |
93 | inherit : 1, /* children inherit it */ | |
94 | pinned : 1, /* must always be on PMU */ | |
95 | exclusive : 1, /* only group on PMU */ | |
96 | exclude_user : 1, /* don't count user */ | |
97 | exclude_kernel : 1, /* ditto kernel */ | |
98 | exclude_hv : 1, /* ditto hypervisor */ | |
99 | exclude_idle : 1, /* don't count when idle */ | |
100 | ||
101 | __reserved_1 : 54; | |
102 | ||
103 | __u32 extra_config_len; | |
104 | __u32 __reserved_4; | |
105 | ||
106 | __u64 __reserved_2; | |
107 | __u64 __reserved_3; | |
108 | }; | |
109 | ||
f7524bda | 110 | |
cea92ce5 WF |
111 | #ifdef __x86_64__ |
112 | # define __NR_perf_counter_open 295 | |
113 | #endif | |
114 | ||
115 | #ifdef __i386__ | |
116 | # define __NR_perf_counter_open 333 | |
117 | #endif | |
118 | ||
119 | #ifdef __powerpc__ | |
120 | #define __NR_perf_counter_open 319 | |
121 | #endif | |
122 | ||
123 | asmlinkage int sys_perf_counter_open( | |
124 | ||
125 | struct perf_counter_hw_event *hw_event_uptr __user, | |
126 | pid_t pid, | |
127 | int cpu, | |
128 | int group_fd, | |
129 | unsigned long flags) | |
130 | { | |
131 | int ret; | |
132 | ||
133 | ret = syscall( | |
134 | __NR_perf_counter_open, hw_event_uptr, pid, cpu, group_fd, flags); | |
135 | #if defined(__x86_64__) || defined(__i386__) | |
136 | if (ret < 0 && ret > -4096) { | |
137 | errno = -ret; | |
138 | ret = -1; | |
139 | } | |
140 | #endif | |
141 | return ret; | |
142 | } |