Commit | Line | Data |
---|---|---|
de0428a7 KW |
1 | /* |
2 | * Performance events x86 architecture header | |
3 | * | |
4 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> | |
5 | * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar | |
6 | * Copyright (C) 2009 Jaswinder Singh Rajput | |
7 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter | |
8 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | |
9 | * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> | |
10 | * Copyright (C) 2009 Google, Inc., Stephane Eranian | |
11 | * | |
12 | * For licencing details see kernel-base/COPYING | |
13 | */ | |
14 | ||
15 | #include <linux/perf_event.h> | |
16 | ||
1c2ac3fd PZ |
17 | #if 0 |
18 | #undef wrmsrl | |
19 | #define wrmsrl(msr, val) \ | |
20 | do { \ | |
21 | unsigned int _msr = (msr); \ | |
22 | u64 _val = (val); \ | |
23 | trace_printk("wrmsrl(%x, %Lx)\n", (unsigned int)(_msr), \ | |
24 | (unsigned long long)(_val)); \ | |
25 | native_write_msr((_msr), (u32)(_val), (u32)(_val >> 32)); \ | |
26 | } while (0) | |
27 | #endif | |
28 | ||
de0428a7 KW |
29 | /* |
30 | * | NHM/WSM | SNB | | |
31 | * register ------------------------------- | |
32 | * | HT | no HT | HT | no HT | | |
33 | *----------------------------------------- | |
34 | * offcore | core | core | cpu | core | | |
35 | * lbr_sel | core | core | cpu | core | | |
36 | * ld_lat | cpu | core | cpu | core | | |
37 | *----------------------------------------- | |
38 | * | |
39 | * Given that there is a small number of shared regs, | |
40 | * we can pre-allocate their slot in the per-cpu | |
41 | * per-core reg tables. | |
42 | */ | |
43 | enum extra_reg_type { | |
44 | EXTRA_REG_NONE = -1, /* not used */ | |
45 | ||
46 | EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */ | |
47 | EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */ | |
b36817e8 | 48 | EXTRA_REG_LBR = 2, /* lbr_select */ |
f20093ee | 49 | EXTRA_REG_LDLAT = 3, /* ld_lat_threshold */ |
de0428a7 KW |
50 | |
51 | EXTRA_REG_MAX /* number of entries needed */ | |
52 | }; | |
53 | ||
54 | struct event_constraint { | |
55 | union { | |
56 | unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | |
57 | u64 idxmsk64; | |
58 | }; | |
59 | u64 code; | |
60 | u64 cmask; | |
61 | int weight; | |
bc1738f6 | 62 | int overlap; |
9fac2cf3 | 63 | int flags; |
de0428a7 | 64 | }; |
f20093ee | 65 | /* |
2f7f73a5 | 66 | * struct hw_perf_event.flags flags |
f20093ee | 67 | */ |
c857eb56 PZ |
68 | #define PERF_X86_EVENT_PEBS_LDLAT 0x0001 /* ld+ldlat data address sampling */ |
69 | #define PERF_X86_EVENT_PEBS_ST 0x0002 /* st data address sampling */ | |
70 | #define PERF_X86_EVENT_PEBS_ST_HSW 0x0004 /* haswell style datala, store */ | |
71 | #define PERF_X86_EVENT_COMMITTED 0x0008 /* event passed commit_txn */ | |
72 | #define PERF_X86_EVENT_PEBS_LD_HSW 0x0010 /* haswell style datala, load */ | |
73 | #define PERF_X86_EVENT_PEBS_NA_HSW 0x0020 /* haswell style datala, unknown */ | |
74 | #define PERF_X86_EVENT_EXCL 0x0040 /* HT exclusivity on counter */ | |
75 | #define PERF_X86_EVENT_DYNAMIC 0x0080 /* dynamic alloc'd constraint */ | |
76 | #define PERF_X86_EVENT_RDPMC_ALLOWED 0x0100 /* grant rdpmc permission */ | |
cc1790cf | 77 | #define PERF_X86_EVENT_EXCL_ACCT 0x0200 /* accounted EXCL event */ |
851559e3 | 78 | #define PERF_X86_EVENT_AUTO_RELOAD 0x0400 /* use PEBS auto-reload */ |
3569c0d7 | 79 | #define PERF_X86_EVENT_FREERUNNING 0x0800 /* use freerunning PEBS */ |
7911d3f7 | 80 | |
de0428a7 KW |
81 | |
82 | struct amd_nb { | |
83 | int nb_id; /* NorthBridge id */ | |
84 | int refcnt; /* reference count */ | |
85 | struct perf_event *owners[X86_PMC_IDX_MAX]; | |
86 | struct event_constraint event_constraints[X86_PMC_IDX_MAX]; | |
87 | }; | |
88 | ||
89 | /* The maximal number of PEBS events: */ | |
70ab7003 | 90 | #define MAX_PEBS_EVENTS 8 |
de0428a7 | 91 | |
3569c0d7 YZ |
92 | /* |
93 | * Flags PEBS can handle without an PMI. | |
94 | * | |
95 | */ | |
96 | #define PEBS_FREERUNNING_FLAGS \ | |
97 | (PERF_SAMPLE_IP | PERF_SAMPLE_ADDR | \ | |
98 | PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \ | |
99 | PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \ | |
100 | PERF_SAMPLE_TRANSACTION) | |
101 | ||
de0428a7 KW |
102 | /* |
103 | * A debug store configuration. | |
104 | * | |
105 | * We only support architectures that use 64bit fields. | |
106 | */ | |
107 | struct debug_store { | |
108 | u64 bts_buffer_base; | |
109 | u64 bts_index; | |
110 | u64 bts_absolute_maximum; | |
111 | u64 bts_interrupt_threshold; | |
112 | u64 pebs_buffer_base; | |
113 | u64 pebs_index; | |
114 | u64 pebs_absolute_maximum; | |
115 | u64 pebs_interrupt_threshold; | |
116 | u64 pebs_event_reset[MAX_PEBS_EVENTS]; | |
117 | }; | |
118 | ||
119 | /* | |
120 | * Per register state. | |
121 | */ | |
122 | struct er_account { | |
123 | raw_spinlock_t lock; /* per-core: protect structure */ | |
124 | u64 config; /* extra MSR config */ | |
125 | u64 reg; /* extra MSR number */ | |
126 | atomic_t ref; /* reference count */ | |
127 | }; | |
128 | ||
129 | /* | |
130 | * Per core/cpu state | |
131 | * | |
132 | * Used to coordinate shared registers between HT threads or | |
133 | * among events on a single PMU. | |
134 | */ | |
135 | struct intel_shared_regs { | |
136 | struct er_account regs[EXTRA_REG_MAX]; | |
137 | int refcnt; /* per-core: #HT threads */ | |
138 | unsigned core_id; /* per-core: core id */ | |
139 | }; | |
140 | ||
6f6539ca MD |
141 | enum intel_excl_state_type { |
142 | INTEL_EXCL_UNUSED = 0, /* counter is unused */ | |
143 | INTEL_EXCL_SHARED = 1, /* counter can be used by both threads */ | |
144 | INTEL_EXCL_EXCLUSIVE = 2, /* counter can be used by one thread only */ | |
145 | }; | |
146 | ||
147 | struct intel_excl_states { | |
6f6539ca | 148 | enum intel_excl_state_type state[X86_PMC_IDX_MAX]; |
e979121b | 149 | bool sched_started; /* true if scheduling has started */ |
6f6539ca MD |
150 | }; |
151 | ||
152 | struct intel_excl_cntrs { | |
153 | raw_spinlock_t lock; | |
154 | ||
155 | struct intel_excl_states states[2]; | |
156 | ||
cc1790cf PZ |
157 | union { |
158 | u16 has_exclusive[2]; | |
159 | u32 exclusive_present; | |
160 | }; | |
161 | ||
6f6539ca MD |
162 | int refcnt; /* per-core: #HT threads */ |
163 | unsigned core_id; /* per-core: core id */ | |
164 | }; | |
165 | ||
de0428a7 KW |
166 | #define MAX_LBR_ENTRIES 16 |
167 | ||
90413464 SE |
168 | enum { |
169 | X86_PERF_KFREE_SHARED = 0, | |
170 | X86_PERF_KFREE_EXCL = 1, | |
171 | X86_PERF_KFREE_MAX | |
172 | }; | |
173 | ||
de0428a7 KW |
174 | struct cpu_hw_events { |
175 | /* | |
176 | * Generic x86 PMC bits | |
177 | */ | |
178 | struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */ | |
179 | unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | |
180 | unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | |
181 | int enabled; | |
182 | ||
c347a2f1 PZ |
183 | int n_events; /* the # of events in the below arrays */ |
184 | int n_added; /* the # last events in the below arrays; | |
185 | they've never been enabled yet */ | |
186 | int n_txn; /* the # last events in the below arrays; | |
187 | added in the current transaction */ | |
de0428a7 KW |
188 | int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */ |
189 | u64 tags[X86_PMC_IDX_MAX]; | |
b371b594 | 190 | |
de0428a7 | 191 | struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ |
b371b594 PZ |
192 | struct event_constraint *event_constraint[X86_PMC_IDX_MAX]; |
193 | ||
cc1790cf | 194 | int n_excl; /* the number of exclusive events */ |
de0428a7 KW |
195 | |
196 | unsigned int group_flag; | |
5a425294 | 197 | int is_fake; |
de0428a7 KW |
198 | |
199 | /* | |
200 | * Intel DebugStore bits | |
201 | */ | |
202 | struct debug_store *ds; | |
203 | u64 pebs_enabled; | |
204 | ||
205 | /* | |
206 | * Intel LBR bits | |
207 | */ | |
208 | int lbr_users; | |
209 | void *lbr_context; | |
210 | struct perf_branch_stack lbr_stack; | |
211 | struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; | |
b36817e8 | 212 | struct er_account *lbr_sel; |
3e702ff6 | 213 | u64 br_sel; |
de0428a7 | 214 | |
144d31e6 GN |
215 | /* |
216 | * Intel host/guest exclude bits | |
217 | */ | |
218 | u64 intel_ctrl_guest_mask; | |
219 | u64 intel_ctrl_host_mask; | |
220 | struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX]; | |
221 | ||
2b9e344d PZ |
222 | /* |
223 | * Intel checkpoint mask | |
224 | */ | |
225 | u64 intel_cp_status; | |
226 | ||
de0428a7 KW |
227 | /* |
228 | * manage shared (per-core, per-cpu) registers | |
229 | * used on Intel NHM/WSM/SNB | |
230 | */ | |
231 | struct intel_shared_regs *shared_regs; | |
6f6539ca MD |
232 | /* |
233 | * manage exclusive counter access between hyperthread | |
234 | */ | |
235 | struct event_constraint *constraint_list; /* in enable order */ | |
236 | struct intel_excl_cntrs *excl_cntrs; | |
237 | int excl_thread_id; /* 0 or 1 */ | |
de0428a7 KW |
238 | |
239 | /* | |
240 | * AMD specific bits | |
241 | */ | |
1018faa6 JR |
242 | struct amd_nb *amd_nb; |
243 | /* Inverted mask of bits to clear in the perf_ctr ctrl registers */ | |
244 | u64 perf_ctr_virt_mask; | |
de0428a7 | 245 | |
90413464 | 246 | void *kfree_on_online[X86_PERF_KFREE_MAX]; |
de0428a7 KW |
247 | }; |
248 | ||
9fac2cf3 | 249 | #define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\ |
de0428a7 KW |
250 | { .idxmsk64 = (n) }, \ |
251 | .code = (c), \ | |
252 | .cmask = (m), \ | |
253 | .weight = (w), \ | |
bc1738f6 | 254 | .overlap = (o), \ |
9fac2cf3 | 255 | .flags = f, \ |
de0428a7 KW |
256 | } |
257 | ||
258 | #define EVENT_CONSTRAINT(c, n, m) \ | |
9fac2cf3 | 259 | __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0) |
bc1738f6 | 260 | |
6f6539ca MD |
261 | #define INTEL_EXCLEVT_CONSTRAINT(c, n) \ |
262 | __EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\ | |
263 | 0, PERF_X86_EVENT_EXCL) | |
264 | ||
bc1738f6 RR |
265 | /* |
266 | * The overlap flag marks event constraints with overlapping counter | |
267 | * masks. This is the case if the counter mask of such an event is not | |
268 | * a subset of any other counter mask of a constraint with an equal or | |
269 | * higher weight, e.g.: | |
270 | * | |
271 | * c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0); | |
272 | * c_another1 = EVENT_CONSTRAINT(0, 0x07, 0); | |
273 | * c_another2 = EVENT_CONSTRAINT(0, 0x38, 0); | |
274 | * | |
275 | * The event scheduler may not select the correct counter in the first | |
276 | * cycle because it needs to know which subsequent events will be | |
277 | * scheduled. It may fail to schedule the events then. So we set the | |
278 | * overlap flag for such constraints to give the scheduler a hint which | |
279 | * events to select for counter rescheduling. | |
280 | * | |
281 | * Care must be taken as the rescheduling algorithm is O(n!) which | |
282 | * will increase scheduling cycles for an over-commited system | |
283 | * dramatically. The number of such EVENT_CONSTRAINT_OVERLAP() macros | |
284 | * and its counter masks must be kept at a minimum. | |
285 | */ | |
286 | #define EVENT_CONSTRAINT_OVERLAP(c, n, m) \ | |
9fac2cf3 | 287 | __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0) |
de0428a7 KW |
288 | |
289 | /* | |
290 | * Constraint on the Event code. | |
291 | */ | |
292 | #define INTEL_EVENT_CONSTRAINT(c, n) \ | |
293 | EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT) | |
294 | ||
295 | /* | |
296 | * Constraint on the Event code + UMask + fixed-mask | |
297 | * | |
298 | * filter mask to validate fixed counter events. | |
299 | * the following filters disqualify for fixed counters: | |
300 | * - inv | |
301 | * - edge | |
302 | * - cnt-mask | |
3a632cb2 AK |
303 | * - in_tx |
304 | * - in_tx_checkpointed | |
de0428a7 KW |
305 | * The other filters are supported by fixed counters. |
306 | * The any-thread option is supported starting with v3. | |
307 | */ | |
3a632cb2 | 308 | #define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_IN_TX|HSW_IN_TX_CHECKPOINTED) |
de0428a7 | 309 | #define FIXED_EVENT_CONSTRAINT(c, n) \ |
3a632cb2 | 310 | EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS) |
de0428a7 KW |
311 | |
312 | /* | |
313 | * Constraint on the Event code + UMask | |
314 | */ | |
315 | #define INTEL_UEVENT_CONSTRAINT(c, n) \ | |
316 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) | |
317 | ||
7550ddff AK |
318 | /* Like UEVENT_CONSTRAINT, but match flags too */ |
319 | #define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n) \ | |
320 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS) | |
321 | ||
e979121b MD |
322 | #define INTEL_EXCLUEVT_CONSTRAINT(c, n) \ |
323 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ | |
324 | HWEIGHT(n), 0, PERF_X86_EVENT_EXCL) | |
325 | ||
f20093ee | 326 | #define INTEL_PLD_CONSTRAINT(c, n) \ |
86a04461 | 327 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
f20093ee SE |
328 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT) |
329 | ||
9ad64c0f | 330 | #define INTEL_PST_CONSTRAINT(c, n) \ |
86a04461 | 331 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ |
9ad64c0f SE |
332 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST) |
333 | ||
86a04461 AK |
334 | /* Event constraint, but match on all event flags too. */ |
335 | #define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \ | |
336 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS) | |
337 | ||
338 | /* Check only flags, but allow all event/umask */ | |
339 | #define INTEL_ALL_EVENT_CONSTRAINT(code, n) \ | |
340 | EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS) | |
341 | ||
342 | /* Check flags and event code, and set the HSW store flag */ | |
343 | #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \ | |
344 | __EVENT_CONSTRAINT(code, n, \ | |
345 | ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ | |
346 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) | |
347 | ||
348 | /* Check flags and event code, and set the HSW load flag */ | |
349 | #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \ | |
b63b4b45 | 350 | __EVENT_CONSTRAINT(code, n, \ |
86a04461 AK |
351 | ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ |
352 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) | |
353 | ||
b63b4b45 MD |
354 | #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \ |
355 | __EVENT_CONSTRAINT(code, n, \ | |
356 | ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ | |
357 | HWEIGHT(n), 0, \ | |
358 | PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL) | |
359 | ||
86a04461 AK |
360 | /* Check flags and event code/umask, and set the HSW store flag */ |
361 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \ | |
362 | __EVENT_CONSTRAINT(code, n, \ | |
363 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ | |
f9134f36 AK |
364 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) |
365 | ||
b63b4b45 MD |
366 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(code, n) \ |
367 | __EVENT_CONSTRAINT(code, n, \ | |
368 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ | |
369 | HWEIGHT(n), 0, \ | |
370 | PERF_X86_EVENT_PEBS_ST_HSW|PERF_X86_EVENT_EXCL) | |
371 | ||
86a04461 AK |
372 | /* Check flags and event code/umask, and set the HSW load flag */ |
373 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \ | |
374 | __EVENT_CONSTRAINT(code, n, \ | |
375 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ | |
376 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) | |
377 | ||
b63b4b45 MD |
378 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(code, n) \ |
379 | __EVENT_CONSTRAINT(code, n, \ | |
380 | INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ | |
381 | HWEIGHT(n), 0, \ | |
382 | PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL) | |
383 | ||
86a04461 AK |
384 | /* Check flags and event code/umask, and set the HSW N/A flag */ |
385 | #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \ | |
386 | __EVENT_CONSTRAINT(code, n, \ | |
387 | INTEL_ARCH_EVENT_MASK|INTEL_ARCH_EVENT_MASK, \ | |
388 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW) | |
389 | ||
390 | ||
cf30d52e MD |
391 | /* |
392 | * We define the end marker as having a weight of -1 | |
393 | * to enable blacklisting of events using a counter bitmask | |
394 | * of zero and thus a weight of zero. | |
395 | * The end marker has a weight that cannot possibly be | |
396 | * obtained from counting the bits in the bitmask. | |
397 | */ | |
398 | #define EVENT_CONSTRAINT_END { .weight = -1 } | |
de0428a7 | 399 | |
cf30d52e MD |
400 | /* |
401 | * Check for end marker with weight == -1 | |
402 | */ | |
de0428a7 | 403 | #define for_each_event_constraint(e, c) \ |
cf30d52e | 404 | for ((e) = (c); (e)->weight != -1; (e)++) |
de0428a7 KW |
405 | |
406 | /* | |
407 | * Extra registers for specific events. | |
408 | * | |
409 | * Some events need large masks and require external MSRs. | |
410 | * Those extra MSRs end up being shared for all events on | |
411 | * a PMU and sometimes between PMU of sibling HT threads. | |
412 | * In either case, the kernel needs to handle conflicting | |
413 | * accesses to those extra, shared, regs. The data structure | |
414 | * to manage those registers is stored in cpu_hw_event. | |
415 | */ | |
416 | struct extra_reg { | |
417 | unsigned int event; | |
418 | unsigned int msr; | |
419 | u64 config_mask; | |
420 | u64 valid_mask; | |
421 | int idx; /* per_xxx->regs[] reg index */ | |
338b522c | 422 | bool extra_msr_access; |
de0428a7 KW |
423 | }; |
424 | ||
425 | #define EVENT_EXTRA_REG(e, ms, m, vm, i) { \ | |
338b522c KL |
426 | .event = (e), \ |
427 | .msr = (ms), \ | |
428 | .config_mask = (m), \ | |
429 | .valid_mask = (vm), \ | |
430 | .idx = EXTRA_REG_##i, \ | |
431 | .extra_msr_access = true, \ | |
de0428a7 KW |
432 | } |
433 | ||
434 | #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \ | |
435 | EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx) | |
436 | ||
f20093ee SE |
437 | #define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \ |
438 | EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \ | |
439 | ARCH_PERFMON_EVENTSEL_UMASK, vm, idx) | |
440 | ||
441 | #define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \ | |
442 | INTEL_UEVENT_EXTRA_REG(c, \ | |
443 | MSR_PEBS_LD_LAT_THRESHOLD, \ | |
444 | 0xffff, \ | |
445 | LDLAT) | |
446 | ||
de0428a7 KW |
447 | #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0) |
448 | ||
449 | union perf_capabilities { | |
450 | struct { | |
451 | u64 lbr_format:6; | |
452 | u64 pebs_trap:1; | |
453 | u64 pebs_arch_reg:1; | |
454 | u64 pebs_format:4; | |
455 | u64 smm_freeze:1; | |
069e0c3c AK |
456 | /* |
457 | * PMU supports separate counter range for writing | |
458 | * values > 32bit. | |
459 | */ | |
460 | u64 full_width_write:1; | |
de0428a7 KW |
461 | }; |
462 | u64 capabilities; | |
463 | }; | |
464 | ||
c1d6f42f PZ |
465 | struct x86_pmu_quirk { |
466 | struct x86_pmu_quirk *next; | |
467 | void (*func)(void); | |
468 | }; | |
469 | ||
f9b4eeb8 PZ |
470 | union x86_pmu_config { |
471 | struct { | |
472 | u64 event:8, | |
473 | umask:8, | |
474 | usr:1, | |
475 | os:1, | |
476 | edge:1, | |
477 | pc:1, | |
478 | interrupt:1, | |
479 | __reserved1:1, | |
480 | en:1, | |
481 | inv:1, | |
482 | cmask:8, | |
483 | event2:4, | |
484 | __reserved2:4, | |
485 | go:1, | |
486 | ho:1; | |
487 | } bits; | |
488 | u64 value; | |
489 | }; | |
490 | ||
491 | #define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value | |
492 | ||
48070342 AS |
493 | enum { |
494 | x86_lbr_exclusive_lbr, | |
8062382c | 495 | x86_lbr_exclusive_bts, |
48070342 AS |
496 | x86_lbr_exclusive_pt, |
497 | x86_lbr_exclusive_max, | |
498 | }; | |
499 | ||
de0428a7 KW |
500 | /* |
501 | * struct x86_pmu - generic x86 pmu | |
502 | */ | |
503 | struct x86_pmu { | |
504 | /* | |
505 | * Generic x86 PMC bits | |
506 | */ | |
507 | const char *name; | |
508 | int version; | |
509 | int (*handle_irq)(struct pt_regs *); | |
510 | void (*disable_all)(void); | |
511 | void (*enable_all)(int added); | |
512 | void (*enable)(struct perf_event *); | |
513 | void (*disable)(struct perf_event *); | |
514 | int (*hw_config)(struct perf_event *event); | |
515 | int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign); | |
516 | unsigned eventsel; | |
517 | unsigned perfctr; | |
4c1fd17a | 518 | int (*addr_offset)(int index, bool eventsel); |
0fbdad07 | 519 | int (*rdpmc_index)(int index); |
de0428a7 KW |
520 | u64 (*event_map)(int); |
521 | int max_events; | |
522 | int num_counters; | |
523 | int num_counters_fixed; | |
524 | int cntval_bits; | |
525 | u64 cntval_mask; | |
ffb871bc GN |
526 | union { |
527 | unsigned long events_maskl; | |
528 | unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)]; | |
529 | }; | |
530 | int events_mask_len; | |
de0428a7 KW |
531 | int apic; |
532 | u64 max_period; | |
533 | struct event_constraint * | |
534 | (*get_event_constraints)(struct cpu_hw_events *cpuc, | |
79cba822 | 535 | int idx, |
de0428a7 KW |
536 | struct perf_event *event); |
537 | ||
538 | void (*put_event_constraints)(struct cpu_hw_events *cpuc, | |
539 | struct perf_event *event); | |
c5362c0c | 540 | |
c5362c0c MD |
541 | void (*start_scheduling)(struct cpu_hw_events *cpuc); |
542 | ||
0c41e756 PZ |
543 | void (*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr); |
544 | ||
c5362c0c MD |
545 | void (*stop_scheduling)(struct cpu_hw_events *cpuc); |
546 | ||
de0428a7 | 547 | struct event_constraint *event_constraints; |
c1d6f42f | 548 | struct x86_pmu_quirk *quirks; |
de0428a7 | 549 | int perfctr_second_write; |
72db5596 | 550 | bool late_ack; |
294fe0f5 | 551 | unsigned (*limit_period)(struct perf_event *event, unsigned l); |
de0428a7 | 552 | |
0c9d42ed PZ |
553 | /* |
554 | * sysfs attrs | |
555 | */ | |
e97df763 | 556 | int attr_rdpmc_broken; |
0c9d42ed | 557 | int attr_rdpmc; |
641cc938 | 558 | struct attribute **format_attrs; |
f20093ee | 559 | struct attribute **event_attrs; |
0c9d42ed | 560 | |
a4747393 | 561 | ssize_t (*events_sysfs_show)(char *page, u64 config); |
1a6461b1 | 562 | struct attribute **cpu_events; |
a4747393 | 563 | |
0c9d42ed PZ |
564 | /* |
565 | * CPU Hotplug hooks | |
566 | */ | |
de0428a7 KW |
567 | int (*cpu_prepare)(int cpu); |
568 | void (*cpu_starting)(int cpu); | |
569 | void (*cpu_dying)(int cpu); | |
570 | void (*cpu_dead)(int cpu); | |
c93dc84c PZ |
571 | |
572 | void (*check_microcode)(void); | |
ba532500 YZ |
573 | void (*sched_task)(struct perf_event_context *ctx, |
574 | bool sched_in); | |
de0428a7 KW |
575 | |
576 | /* | |
577 | * Intel Arch Perfmon v2+ | |
578 | */ | |
579 | u64 intel_ctrl; | |
580 | union perf_capabilities intel_cap; | |
581 | ||
582 | /* | |
583 | * Intel DebugStore bits | |
584 | */ | |
597ed953 | 585 | unsigned int bts :1, |
3e0091e2 PZ |
586 | bts_active :1, |
587 | pebs :1, | |
588 | pebs_active :1, | |
589 | pebs_broken :1; | |
de0428a7 KW |
590 | int pebs_record_size; |
591 | void (*drain_pebs)(struct pt_regs *regs); | |
592 | struct event_constraint *pebs_constraints; | |
0780c927 | 593 | void (*pebs_aliases)(struct perf_event *event); |
70ab7003 | 594 | int max_pebs_events; |
de0428a7 KW |
595 | |
596 | /* | |
597 | * Intel LBR | |
598 | */ | |
599 | unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */ | |
600 | int lbr_nr; /* hardware stack size */ | |
b36817e8 SE |
601 | u64 lbr_sel_mask; /* LBR_SELECT valid bits */ |
602 | const int *lbr_sel_map; /* lbr_select mappings */ | |
b7af41a1 | 603 | bool lbr_double_abort; /* duplicated lbr aborts */ |
de0428a7 | 604 | |
48070342 AS |
605 | /* |
606 | * Intel PT/LBR/BTS are exclusive | |
607 | */ | |
608 | atomic_t lbr_exclusive[x86_lbr_exclusive_max]; | |
609 | ||
de0428a7 KW |
610 | /* |
611 | * Extra registers for events | |
612 | */ | |
613 | struct extra_reg *extra_regs; | |
9a5e3fb5 | 614 | unsigned int flags; |
144d31e6 GN |
615 | |
616 | /* | |
617 | * Intel host/guest support (KVM) | |
618 | */ | |
619 | struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); | |
de0428a7 KW |
620 | }; |
621 | ||
e18bf526 YZ |
622 | struct x86_perf_task_context { |
623 | u64 lbr_from[MAX_LBR_ENTRIES]; | |
624 | u64 lbr_to[MAX_LBR_ENTRIES]; | |
625 | int lbr_callstack_users; | |
626 | int lbr_stack_state; | |
627 | }; | |
628 | ||
c1d6f42f PZ |
629 | #define x86_add_quirk(func_) \ |
630 | do { \ | |
631 | static struct x86_pmu_quirk __quirk __initdata = { \ | |
632 | .func = func_, \ | |
633 | }; \ | |
634 | __quirk.next = x86_pmu.quirks; \ | |
635 | x86_pmu.quirks = &__quirk; \ | |
636 | } while (0) | |
637 | ||
9a5e3fb5 SE |
638 | /* |
639 | * x86_pmu flags | |
640 | */ | |
641 | #define PMU_FL_NO_HT_SHARING 0x1 /* no hyper-threading resource sharing */ | |
642 | #define PMU_FL_HAS_RSP_1 0x2 /* has 2 equivalent offcore_rsp regs */ | |
6f6539ca | 643 | #define PMU_FL_EXCL_CNTRS 0x4 /* has exclusive counter requirements */ |
b37609c3 | 644 | #define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */ |
de0428a7 | 645 | |
3a54aaa0 SE |
646 | #define EVENT_VAR(_id) event_attr_##_id |
647 | #define EVENT_PTR(_id) &event_attr_##_id.attr.attr | |
648 | ||
649 | #define EVENT_ATTR(_name, _id) \ | |
650 | static struct perf_pmu_events_attr EVENT_VAR(_id) = { \ | |
651 | .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \ | |
652 | .id = PERF_COUNT_HW_##_id, \ | |
653 | .event_str = NULL, \ | |
654 | }; | |
655 | ||
656 | #define EVENT_ATTR_STR(_name, v, str) \ | |
657 | static struct perf_pmu_events_attr event_attr_##v = { \ | |
658 | .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \ | |
659 | .id = 0, \ | |
660 | .event_str = str, \ | |
661 | }; | |
662 | ||
de0428a7 KW |
663 | extern struct x86_pmu x86_pmu __read_mostly; |
664 | ||
e9d7f7cd YZ |
665 | static inline bool x86_pmu_has_lbr_callstack(void) |
666 | { | |
667 | return x86_pmu.lbr_sel_map && | |
668 | x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0; | |
669 | } | |
670 | ||
de0428a7 KW |
671 | DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events); |
672 | ||
673 | int x86_perf_event_set_period(struct perf_event *event); | |
674 | ||
675 | /* | |
676 | * Generalized hw caching related hw_event table, filled | |
677 | * in on a per model basis. A value of 0 means | |
678 | * 'not supported', -1 means 'hw_event makes no sense on | |
679 | * this CPU', any other value means the raw hw_event | |
680 | * ID. | |
681 | */ | |
682 | ||
683 | #define C(x) PERF_COUNT_HW_CACHE_##x | |
684 | ||
685 | extern u64 __read_mostly hw_cache_event_ids | |
686 | [PERF_COUNT_HW_CACHE_MAX] | |
687 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
688 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | |
689 | extern u64 __read_mostly hw_cache_extra_regs | |
690 | [PERF_COUNT_HW_CACHE_MAX] | |
691 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
692 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | |
693 | ||
694 | u64 x86_perf_event_update(struct perf_event *event); | |
695 | ||
de0428a7 KW |
696 | static inline unsigned int x86_pmu_config_addr(int index) |
697 | { | |
4c1fd17a JS |
698 | return x86_pmu.eventsel + (x86_pmu.addr_offset ? |
699 | x86_pmu.addr_offset(index, true) : index); | |
de0428a7 KW |
700 | } |
701 | ||
702 | static inline unsigned int x86_pmu_event_addr(int index) | |
703 | { | |
4c1fd17a JS |
704 | return x86_pmu.perfctr + (x86_pmu.addr_offset ? |
705 | x86_pmu.addr_offset(index, false) : index); | |
de0428a7 KW |
706 | } |
707 | ||
0fbdad07 JS |
708 | static inline int x86_pmu_rdpmc_index(int index) |
709 | { | |
710 | return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index; | |
711 | } | |
712 | ||
48070342 AS |
713 | int x86_add_exclusive(unsigned int what); |
714 | ||
715 | void x86_del_exclusive(unsigned int what); | |
716 | ||
717 | void hw_perf_lbr_event_destroy(struct perf_event *event); | |
718 | ||
de0428a7 KW |
719 | int x86_setup_perfctr(struct perf_event *event); |
720 | ||
721 | int x86_pmu_hw_config(struct perf_event *event); | |
722 | ||
723 | void x86_pmu_disable_all(void); | |
724 | ||
725 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, | |
726 | u64 enable_mask) | |
727 | { | |
1018faa6 JR |
728 | u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask); |
729 | ||
de0428a7 KW |
730 | if (hwc->extra_reg.reg) |
731 | wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); | |
1018faa6 | 732 | wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask); |
de0428a7 KW |
733 | } |
734 | ||
735 | void x86_pmu_enable_all(int added); | |
736 | ||
b371b594 | 737 | int perf_assign_events(struct event_constraint **constraints, int n, |
cc1790cf | 738 | int wmin, int wmax, int gpmax, int *assign); |
de0428a7 KW |
739 | int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign); |
740 | ||
741 | void x86_pmu_stop(struct perf_event *event, int flags); | |
742 | ||
743 | static inline void x86_pmu_disable_event(struct perf_event *event) | |
744 | { | |
745 | struct hw_perf_event *hwc = &event->hw; | |
746 | ||
747 | wrmsrl(hwc->config_base, hwc->config); | |
748 | } | |
749 | ||
750 | void x86_pmu_enable_event(struct perf_event *event); | |
751 | ||
752 | int x86_pmu_handle_irq(struct pt_regs *regs); | |
753 | ||
754 | extern struct event_constraint emptyconstraint; | |
755 | ||
756 | extern struct event_constraint unconstrained; | |
757 | ||
3e702ff6 SE |
758 | static inline bool kernel_ip(unsigned long ip) |
759 | { | |
760 | #ifdef CONFIG_X86_32 | |
761 | return ip > PAGE_OFFSET; | |
762 | #else | |
763 | return (long)ip < 0; | |
764 | #endif | |
765 | } | |
766 | ||
d07bdfd3 PZ |
767 | /* |
768 | * Not all PMUs provide the right context information to place the reported IP | |
769 | * into full context. Specifically segment registers are typically not | |
770 | * supplied. | |
771 | * | |
772 | * Assuming the address is a linear address (it is for IBS), we fake the CS and | |
773 | * vm86 mode using the known zero-based code segment and 'fix up' the registers | |
774 | * to reflect this. | |
775 | * | |
776 | * Intel PEBS/LBR appear to typically provide the effective address, nothing | |
777 | * much we can do about that but pray and treat it like a linear address. | |
778 | */ | |
779 | static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip) | |
780 | { | |
781 | regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS; | |
782 | if (regs->flags & X86_VM_MASK) | |
783 | regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK); | |
784 | regs->ip = ip; | |
785 | } | |
786 | ||
0bf79d44 | 787 | ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event); |
20550a43 | 788 | ssize_t intel_event_sysfs_show(char *page, u64 config); |
43c032fe | 789 | |
de0428a7 KW |
790 | #ifdef CONFIG_CPU_SUP_AMD |
791 | ||
792 | int amd_pmu_init(void); | |
793 | ||
794 | #else /* CONFIG_CPU_SUP_AMD */ | |
795 | ||
796 | static inline int amd_pmu_init(void) | |
797 | { | |
798 | return 0; | |
799 | } | |
800 | ||
801 | #endif /* CONFIG_CPU_SUP_AMD */ | |
802 | ||
803 | #ifdef CONFIG_CPU_SUP_INTEL | |
804 | ||
48070342 AS |
805 | static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event) |
806 | { | |
807 | /* user explicitly requested branch sampling */ | |
808 | if (has_branch_stack(event)) | |
809 | return true; | |
810 | ||
811 | /* implicit branch sampling to correct PEBS skid */ | |
812 | if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1 && | |
813 | x86_pmu.intel_cap.pebs_format < 2) | |
814 | return true; | |
815 | ||
816 | return false; | |
817 | } | |
818 | ||
819 | static inline bool intel_pmu_has_bts(struct perf_event *event) | |
820 | { | |
821 | if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS && | |
822 | !event->attr.freq && event->hw.sample_period == 1) | |
823 | return true; | |
824 | ||
825 | return false; | |
826 | } | |
827 | ||
de0428a7 KW |
828 | int intel_pmu_save_and_restart(struct perf_event *event); |
829 | ||
830 | struct event_constraint * | |
79cba822 SE |
831 | x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx, |
832 | struct perf_event *event); | |
de0428a7 KW |
833 | |
834 | struct intel_shared_regs *allocate_shared_regs(int cpu); | |
835 | ||
836 | int intel_pmu_init(void); | |
837 | ||
838 | void init_debug_store_on_cpu(int cpu); | |
839 | ||
840 | void fini_debug_store_on_cpu(int cpu); | |
841 | ||
842 | void release_ds_buffers(void); | |
843 | ||
844 | void reserve_ds_buffers(void); | |
845 | ||
846 | extern struct event_constraint bts_constraint; | |
847 | ||
848 | void intel_pmu_enable_bts(u64 config); | |
849 | ||
850 | void intel_pmu_disable_bts(void); | |
851 | ||
852 | int intel_pmu_drain_bts_buffer(void); | |
853 | ||
854 | extern struct event_constraint intel_core2_pebs_event_constraints[]; | |
855 | ||
856 | extern struct event_constraint intel_atom_pebs_event_constraints[]; | |
857 | ||
1fa64180 YZ |
858 | extern struct event_constraint intel_slm_pebs_event_constraints[]; |
859 | ||
de0428a7 KW |
860 | extern struct event_constraint intel_nehalem_pebs_event_constraints[]; |
861 | ||
862 | extern struct event_constraint intel_westmere_pebs_event_constraints[]; | |
863 | ||
864 | extern struct event_constraint intel_snb_pebs_event_constraints[]; | |
865 | ||
20a36e39 SE |
866 | extern struct event_constraint intel_ivb_pebs_event_constraints[]; |
867 | ||
3044318f AK |
868 | extern struct event_constraint intel_hsw_pebs_event_constraints[]; |
869 | ||
de0428a7 KW |
870 | struct event_constraint *intel_pebs_constraints(struct perf_event *event); |
871 | ||
872 | void intel_pmu_pebs_enable(struct perf_event *event); | |
873 | ||
874 | void intel_pmu_pebs_disable(struct perf_event *event); | |
875 | ||
876 | void intel_pmu_pebs_enable_all(void); | |
877 | ||
878 | void intel_pmu_pebs_disable_all(void); | |
879 | ||
880 | void intel_ds_init(void); | |
881 | ||
2a0ad3b3 YZ |
882 | void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in); |
883 | ||
de0428a7 KW |
884 | void intel_pmu_lbr_reset(void); |
885 | ||
886 | void intel_pmu_lbr_enable(struct perf_event *event); | |
887 | ||
888 | void intel_pmu_lbr_disable(struct perf_event *event); | |
889 | ||
1a78d937 | 890 | void intel_pmu_lbr_enable_all(bool pmi); |
de0428a7 KW |
891 | |
892 | void intel_pmu_lbr_disable_all(void); | |
893 | ||
894 | void intel_pmu_lbr_read(void); | |
895 | ||
896 | void intel_pmu_lbr_init_core(void); | |
897 | ||
898 | void intel_pmu_lbr_init_nhm(void); | |
899 | ||
900 | void intel_pmu_lbr_init_atom(void); | |
901 | ||
c5cc2cd9 SE |
902 | void intel_pmu_lbr_init_snb(void); |
903 | ||
e9d7f7cd YZ |
904 | void intel_pmu_lbr_init_hsw(void); |
905 | ||
60ce0fbd SE |
906 | int intel_pmu_setup_lbr_filter(struct perf_event *event); |
907 | ||
52ca9ced AS |
908 | void intel_pt_interrupt(void); |
909 | ||
8062382c AS |
910 | int intel_bts_interrupt(void); |
911 | ||
912 | void intel_bts_enable_local(void); | |
913 | ||
914 | void intel_bts_disable_local(void); | |
915 | ||
de0428a7 KW |
916 | int p4_pmu_init(void); |
917 | ||
918 | int p6_pmu_init(void); | |
919 | ||
e717bf4e VW |
920 | int knc_pmu_init(void); |
921 | ||
f20093ee SE |
922 | ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, |
923 | char *page); | |
924 | ||
b37609c3 SE |
925 | static inline int is_ht_workaround_enabled(void) |
926 | { | |
927 | return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED); | |
928 | } | |
de0428a7 KW |
929 | #else /* CONFIG_CPU_SUP_INTEL */ |
930 | ||
931 | static inline void reserve_ds_buffers(void) | |
932 | { | |
933 | } | |
934 | ||
935 | static inline void release_ds_buffers(void) | |
936 | { | |
937 | } | |
938 | ||
939 | static inline int intel_pmu_init(void) | |
940 | { | |
941 | return 0; | |
942 | } | |
943 | ||
944 | static inline struct intel_shared_regs *allocate_shared_regs(int cpu) | |
945 | { | |
946 | return NULL; | |
947 | } | |
948 | ||
cc1790cf PZ |
949 | static inline int is_ht_workaround_enabled(void) |
950 | { | |
951 | return 0; | |
952 | } | |
de0428a7 | 953 | #endif /* CONFIG_CPU_SUP_INTEL */ |