Commit | Line | Data |
---|---|---|
de0428a7 KW |
1 | /* |
2 | * Performance events x86 architecture header | |
3 | * | |
4 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> | |
5 | * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar | |
6 | * Copyright (C) 2009 Jaswinder Singh Rajput | |
7 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter | |
8 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | |
9 | * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> | |
10 | * Copyright (C) 2009 Google, Inc., Stephane Eranian | |
11 | * | |
12 | * For licencing details see kernel-base/COPYING | |
13 | */ | |
14 | ||
15 | #include <linux/perf_event.h> | |
16 | ||
1c2ac3fd PZ |
17 | #if 0 |
18 | #undef wrmsrl | |
19 | #define wrmsrl(msr, val) \ | |
20 | do { \ | |
21 | unsigned int _msr = (msr); \ | |
22 | u64 _val = (val); \ | |
23 | trace_printk("wrmsrl(%x, %Lx)\n", (unsigned int)(_msr), \ | |
24 | (unsigned long long)(_val)); \ | |
25 | native_write_msr((_msr), (u32)(_val), (u32)(_val >> 32)); \ | |
26 | } while (0) | |
27 | #endif | |
28 | ||
de0428a7 KW |
29 | /* |
30 | * | NHM/WSM | SNB | | |
31 | * register ------------------------------- | |
32 | * | HT | no HT | HT | no HT | | |
33 | *----------------------------------------- | |
34 | * offcore | core | core | cpu | core | | |
35 | * lbr_sel | core | core | cpu | core | | |
36 | * ld_lat | cpu | core | cpu | core | | |
37 | *----------------------------------------- | |
38 | * | |
39 | * Given that there is a small number of shared regs, | |
40 | * we can pre-allocate their slot in the per-cpu | |
41 | * per-core reg tables. | |
42 | */ | |
43 | enum extra_reg_type { | |
44 | EXTRA_REG_NONE = -1, /* not used */ | |
45 | ||
46 | EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */ | |
47 | EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */ | |
b36817e8 | 48 | EXTRA_REG_LBR = 2, /* lbr_select */ |
de0428a7 KW |
49 | |
50 | EXTRA_REG_MAX /* number of entries needed */ | |
51 | }; | |
52 | ||
53 | struct event_constraint { | |
54 | union { | |
55 | unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | |
56 | u64 idxmsk64; | |
57 | }; | |
58 | u64 code; | |
59 | u64 cmask; | |
60 | int weight; | |
bc1738f6 | 61 | int overlap; |
de0428a7 KW |
62 | }; |
63 | ||
64 | struct amd_nb { | |
65 | int nb_id; /* NorthBridge id */ | |
66 | int refcnt; /* reference count */ | |
67 | struct perf_event *owners[X86_PMC_IDX_MAX]; | |
68 | struct event_constraint event_constraints[X86_PMC_IDX_MAX]; | |
69 | }; | |
70 | ||
71 | /* The maximal number of PEBS events: */ | |
70ab7003 | 72 | #define MAX_PEBS_EVENTS 8 |
de0428a7 KW |
73 | |
74 | /* | |
75 | * A debug store configuration. | |
76 | * | |
77 | * We only support architectures that use 64bit fields. | |
78 | */ | |
79 | struct debug_store { | |
80 | u64 bts_buffer_base; | |
81 | u64 bts_index; | |
82 | u64 bts_absolute_maximum; | |
83 | u64 bts_interrupt_threshold; | |
84 | u64 pebs_buffer_base; | |
85 | u64 pebs_index; | |
86 | u64 pebs_absolute_maximum; | |
87 | u64 pebs_interrupt_threshold; | |
88 | u64 pebs_event_reset[MAX_PEBS_EVENTS]; | |
89 | }; | |
90 | ||
91 | /* | |
92 | * Per register state. | |
93 | */ | |
94 | struct er_account { | |
95 | raw_spinlock_t lock; /* per-core: protect structure */ | |
96 | u64 config; /* extra MSR config */ | |
97 | u64 reg; /* extra MSR number */ | |
98 | atomic_t ref; /* reference count */ | |
99 | }; | |
100 | ||
101 | /* | |
102 | * Per core/cpu state | |
103 | * | |
104 | * Used to coordinate shared registers between HT threads or | |
105 | * among events on a single PMU. | |
106 | */ | |
107 | struct intel_shared_regs { | |
108 | struct er_account regs[EXTRA_REG_MAX]; | |
109 | int refcnt; /* per-core: #HT threads */ | |
110 | unsigned core_id; /* per-core: core id */ | |
111 | }; | |
112 | ||
113 | #define MAX_LBR_ENTRIES 16 | |
114 | ||
115 | struct cpu_hw_events { | |
116 | /* | |
117 | * Generic x86 PMC bits | |
118 | */ | |
119 | struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */ | |
120 | unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | |
121 | unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | |
122 | int enabled; | |
123 | ||
124 | int n_events; | |
125 | int n_added; | |
126 | int n_txn; | |
127 | int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */ | |
128 | u64 tags[X86_PMC_IDX_MAX]; | |
129 | struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ | |
130 | ||
131 | unsigned int group_flag; | |
5a425294 | 132 | int is_fake; |
de0428a7 KW |
133 | |
134 | /* | |
135 | * Intel DebugStore bits | |
136 | */ | |
137 | struct debug_store *ds; | |
138 | u64 pebs_enabled; | |
139 | ||
140 | /* | |
141 | * Intel LBR bits | |
142 | */ | |
143 | int lbr_users; | |
144 | void *lbr_context; | |
145 | struct perf_branch_stack lbr_stack; | |
146 | struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; | |
b36817e8 | 147 | struct er_account *lbr_sel; |
3e702ff6 | 148 | u64 br_sel; |
de0428a7 | 149 | |
144d31e6 GN |
150 | /* |
151 | * Intel host/guest exclude bits | |
152 | */ | |
153 | u64 intel_ctrl_guest_mask; | |
154 | u64 intel_ctrl_host_mask; | |
155 | struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX]; | |
156 | ||
de0428a7 KW |
157 | /* |
158 | * manage shared (per-core, per-cpu) registers | |
159 | * used on Intel NHM/WSM/SNB | |
160 | */ | |
161 | struct intel_shared_regs *shared_regs; | |
162 | ||
163 | /* | |
164 | * AMD specific bits | |
165 | */ | |
1018faa6 JR |
166 | struct amd_nb *amd_nb; |
167 | /* Inverted mask of bits to clear in the perf_ctr ctrl registers */ | |
168 | u64 perf_ctr_virt_mask; | |
de0428a7 KW |
169 | |
170 | void *kfree_on_online; | |
171 | }; | |
172 | ||
bc1738f6 | 173 | #define __EVENT_CONSTRAINT(c, n, m, w, o) {\ |
de0428a7 KW |
174 | { .idxmsk64 = (n) }, \ |
175 | .code = (c), \ | |
176 | .cmask = (m), \ | |
177 | .weight = (w), \ | |
bc1738f6 | 178 | .overlap = (o), \ |
de0428a7 KW |
179 | } |
180 | ||
181 | #define EVENT_CONSTRAINT(c, n, m) \ | |
bc1738f6 RR |
182 | __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0) |
183 | ||
184 | /* | |
185 | * The overlap flag marks event constraints with overlapping counter | |
186 | * masks. This is the case if the counter mask of such an event is not | |
187 | * a subset of any other counter mask of a constraint with an equal or | |
188 | * higher weight, e.g.: | |
189 | * | |
190 | * c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0); | |
191 | * c_another1 = EVENT_CONSTRAINT(0, 0x07, 0); | |
192 | * c_another2 = EVENT_CONSTRAINT(0, 0x38, 0); | |
193 | * | |
194 | * The event scheduler may not select the correct counter in the first | |
195 | * cycle because it needs to know which subsequent events will be | |
196 | * scheduled. It may fail to schedule the events then. So we set the | |
197 | * overlap flag for such constraints to give the scheduler a hint which | |
198 | * events to select for counter rescheduling. | |
199 | * | |
200 | * Care must be taken as the rescheduling algorithm is O(n!) which | |
201 | * will increase scheduling cycles for an over-commited system | |
202 | * dramatically. The number of such EVENT_CONSTRAINT_OVERLAP() macros | |
203 | * and its counter masks must be kept at a minimum. | |
204 | */ | |
205 | #define EVENT_CONSTRAINT_OVERLAP(c, n, m) \ | |
206 | __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1) | |
de0428a7 KW |
207 | |
208 | /* | |
209 | * Constraint on the Event code. | |
210 | */ | |
211 | #define INTEL_EVENT_CONSTRAINT(c, n) \ | |
212 | EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT) | |
213 | ||
214 | /* | |
215 | * Constraint on the Event code + UMask + fixed-mask | |
216 | * | |
217 | * filter mask to validate fixed counter events. | |
218 | * the following filters disqualify for fixed counters: | |
219 | * - inv | |
220 | * - edge | |
221 | * - cnt-mask | |
222 | * The other filters are supported by fixed counters. | |
223 | * The any-thread option is supported starting with v3. | |
224 | */ | |
225 | #define FIXED_EVENT_CONSTRAINT(c, n) \ | |
226 | EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK) | |
227 | ||
228 | /* | |
229 | * Constraint on the Event code + UMask | |
230 | */ | |
231 | #define INTEL_UEVENT_CONSTRAINT(c, n) \ | |
232 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) | |
233 | ||
234 | #define EVENT_CONSTRAINT_END \ | |
235 | EVENT_CONSTRAINT(0, 0, 0) | |
236 | ||
237 | #define for_each_event_constraint(e, c) \ | |
238 | for ((e) = (c); (e)->weight; (e)++) | |
239 | ||
240 | /* | |
241 | * Extra registers for specific events. | |
242 | * | |
243 | * Some events need large masks and require external MSRs. | |
244 | * Those extra MSRs end up being shared for all events on | |
245 | * a PMU and sometimes between PMU of sibling HT threads. | |
246 | * In either case, the kernel needs to handle conflicting | |
247 | * accesses to those extra, shared, regs. The data structure | |
248 | * to manage those registers is stored in cpu_hw_event. | |
249 | */ | |
250 | struct extra_reg { | |
251 | unsigned int event; | |
252 | unsigned int msr; | |
253 | u64 config_mask; | |
254 | u64 valid_mask; | |
255 | int idx; /* per_xxx->regs[] reg index */ | |
256 | }; | |
257 | ||
258 | #define EVENT_EXTRA_REG(e, ms, m, vm, i) { \ | |
259 | .event = (e), \ | |
260 | .msr = (ms), \ | |
261 | .config_mask = (m), \ | |
262 | .valid_mask = (vm), \ | |
263 | .idx = EXTRA_REG_##i \ | |
264 | } | |
265 | ||
266 | #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \ | |
267 | EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx) | |
268 | ||
269 | #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0) | |
270 | ||
271 | union perf_capabilities { | |
272 | struct { | |
273 | u64 lbr_format:6; | |
274 | u64 pebs_trap:1; | |
275 | u64 pebs_arch_reg:1; | |
276 | u64 pebs_format:4; | |
277 | u64 smm_freeze:1; | |
278 | }; | |
279 | u64 capabilities; | |
280 | }; | |
281 | ||
c1d6f42f PZ |
282 | struct x86_pmu_quirk { |
283 | struct x86_pmu_quirk *next; | |
284 | void (*func)(void); | |
285 | }; | |
286 | ||
f9b4eeb8 PZ |
287 | union x86_pmu_config { |
288 | struct { | |
289 | u64 event:8, | |
290 | umask:8, | |
291 | usr:1, | |
292 | os:1, | |
293 | edge:1, | |
294 | pc:1, | |
295 | interrupt:1, | |
296 | __reserved1:1, | |
297 | en:1, | |
298 | inv:1, | |
299 | cmask:8, | |
300 | event2:4, | |
301 | __reserved2:4, | |
302 | go:1, | |
303 | ho:1; | |
304 | } bits; | |
305 | u64 value; | |
306 | }; | |
307 | ||
308 | #define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value | |
309 | ||
de0428a7 KW |
310 | /* |
311 | * struct x86_pmu - generic x86 pmu | |
312 | */ | |
313 | struct x86_pmu { | |
314 | /* | |
315 | * Generic x86 PMC bits | |
316 | */ | |
317 | const char *name; | |
318 | int version; | |
319 | int (*handle_irq)(struct pt_regs *); | |
320 | void (*disable_all)(void); | |
321 | void (*enable_all)(int added); | |
322 | void (*enable)(struct perf_event *); | |
323 | void (*disable)(struct perf_event *); | |
324 | int (*hw_config)(struct perf_event *event); | |
325 | int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign); | |
326 | unsigned eventsel; | |
327 | unsigned perfctr; | |
4c1fd17a | 328 | int (*addr_offset)(int index, bool eventsel); |
0fbdad07 | 329 | int (*rdpmc_index)(int index); |
de0428a7 KW |
330 | u64 (*event_map)(int); |
331 | int max_events; | |
332 | int num_counters; | |
333 | int num_counters_fixed; | |
334 | int cntval_bits; | |
335 | u64 cntval_mask; | |
ffb871bc GN |
336 | union { |
337 | unsigned long events_maskl; | |
338 | unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)]; | |
339 | }; | |
340 | int events_mask_len; | |
de0428a7 KW |
341 | int apic; |
342 | u64 max_period; | |
343 | struct event_constraint * | |
344 | (*get_event_constraints)(struct cpu_hw_events *cpuc, | |
345 | struct perf_event *event); | |
346 | ||
347 | void (*put_event_constraints)(struct cpu_hw_events *cpuc, | |
348 | struct perf_event *event); | |
349 | struct event_constraint *event_constraints; | |
c1d6f42f | 350 | struct x86_pmu_quirk *quirks; |
de0428a7 KW |
351 | int perfctr_second_write; |
352 | ||
0c9d42ed PZ |
353 | /* |
354 | * sysfs attrs | |
355 | */ | |
356 | int attr_rdpmc; | |
641cc938 | 357 | struct attribute **format_attrs; |
0c9d42ed | 358 | |
a4747393 JO |
359 | ssize_t (*events_sysfs_show)(char *page, u64 config); |
360 | ||
0c9d42ed PZ |
361 | /* |
362 | * CPU Hotplug hooks | |
363 | */ | |
de0428a7 KW |
364 | int (*cpu_prepare)(int cpu); |
365 | void (*cpu_starting)(int cpu); | |
366 | void (*cpu_dying)(int cpu); | |
367 | void (*cpu_dead)(int cpu); | |
c93dc84c PZ |
368 | |
369 | void (*check_microcode)(void); | |
d010b332 | 370 | void (*flush_branch_stack)(void); |
de0428a7 KW |
371 | |
372 | /* | |
373 | * Intel Arch Perfmon v2+ | |
374 | */ | |
375 | u64 intel_ctrl; | |
376 | union perf_capabilities intel_cap; | |
377 | ||
378 | /* | |
379 | * Intel DebugStore bits | |
380 | */ | |
597ed953 | 381 | unsigned int bts :1, |
3e0091e2 PZ |
382 | bts_active :1, |
383 | pebs :1, | |
384 | pebs_active :1, | |
385 | pebs_broken :1; | |
de0428a7 KW |
386 | int pebs_record_size; |
387 | void (*drain_pebs)(struct pt_regs *regs); | |
388 | struct event_constraint *pebs_constraints; | |
0780c927 | 389 | void (*pebs_aliases)(struct perf_event *event); |
70ab7003 | 390 | int max_pebs_events; |
de0428a7 KW |
391 | |
392 | /* | |
393 | * Intel LBR | |
394 | */ | |
395 | unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */ | |
396 | int lbr_nr; /* hardware stack size */ | |
b36817e8 SE |
397 | u64 lbr_sel_mask; /* LBR_SELECT valid bits */ |
398 | const int *lbr_sel_map; /* lbr_select mappings */ | |
de0428a7 KW |
399 | |
400 | /* | |
401 | * Extra registers for events | |
402 | */ | |
403 | struct extra_reg *extra_regs; | |
404 | unsigned int er_flags; | |
144d31e6 GN |
405 | |
406 | /* | |
407 | * Intel host/guest support (KVM) | |
408 | */ | |
409 | struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); | |
de0428a7 KW |
410 | }; |
411 | ||
c1d6f42f PZ |
412 | #define x86_add_quirk(func_) \ |
413 | do { \ | |
414 | static struct x86_pmu_quirk __quirk __initdata = { \ | |
415 | .func = func_, \ | |
416 | }; \ | |
417 | __quirk.next = x86_pmu.quirks; \ | |
418 | x86_pmu.quirks = &__quirk; \ | |
419 | } while (0) | |
420 | ||
de0428a7 KW |
421 | #define ERF_NO_HT_SHARING 1 |
422 | #define ERF_HAS_RSP_1 2 | |
423 | ||
424 | extern struct x86_pmu x86_pmu __read_mostly; | |
425 | ||
426 | DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events); | |
427 | ||
428 | int x86_perf_event_set_period(struct perf_event *event); | |
429 | ||
430 | /* | |
431 | * Generalized hw caching related hw_event table, filled | |
432 | * in on a per model basis. A value of 0 means | |
433 | * 'not supported', -1 means 'hw_event makes no sense on | |
434 | * this CPU', any other value means the raw hw_event | |
435 | * ID. | |
436 | */ | |
437 | ||
438 | #define C(x) PERF_COUNT_HW_CACHE_##x | |
439 | ||
440 | extern u64 __read_mostly hw_cache_event_ids | |
441 | [PERF_COUNT_HW_CACHE_MAX] | |
442 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
443 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | |
444 | extern u64 __read_mostly hw_cache_extra_regs | |
445 | [PERF_COUNT_HW_CACHE_MAX] | |
446 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
447 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | |
448 | ||
449 | u64 x86_perf_event_update(struct perf_event *event); | |
450 | ||
de0428a7 KW |
451 | static inline unsigned int x86_pmu_config_addr(int index) |
452 | { | |
4c1fd17a JS |
453 | return x86_pmu.eventsel + (x86_pmu.addr_offset ? |
454 | x86_pmu.addr_offset(index, true) : index); | |
de0428a7 KW |
455 | } |
456 | ||
457 | static inline unsigned int x86_pmu_event_addr(int index) | |
458 | { | |
4c1fd17a JS |
459 | return x86_pmu.perfctr + (x86_pmu.addr_offset ? |
460 | x86_pmu.addr_offset(index, false) : index); | |
de0428a7 KW |
461 | } |
462 | ||
0fbdad07 JS |
463 | static inline int x86_pmu_rdpmc_index(int index) |
464 | { | |
465 | return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index; | |
466 | } | |
467 | ||
de0428a7 KW |
468 | int x86_setup_perfctr(struct perf_event *event); |
469 | ||
470 | int x86_pmu_hw_config(struct perf_event *event); | |
471 | ||
472 | void x86_pmu_disable_all(void); | |
473 | ||
474 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, | |
475 | u64 enable_mask) | |
476 | { | |
1018faa6 JR |
477 | u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask); |
478 | ||
de0428a7 KW |
479 | if (hwc->extra_reg.reg) |
480 | wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); | |
1018faa6 | 481 | wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask); |
de0428a7 KW |
482 | } |
483 | ||
484 | void x86_pmu_enable_all(int added); | |
485 | ||
4b4969b1 YZ |
486 | int perf_assign_events(struct event_constraint **constraints, int n, |
487 | int wmin, int wmax, int *assign); | |
de0428a7 KW |
488 | int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign); |
489 | ||
490 | void x86_pmu_stop(struct perf_event *event, int flags); | |
491 | ||
492 | static inline void x86_pmu_disable_event(struct perf_event *event) | |
493 | { | |
494 | struct hw_perf_event *hwc = &event->hw; | |
495 | ||
496 | wrmsrl(hwc->config_base, hwc->config); | |
497 | } | |
498 | ||
499 | void x86_pmu_enable_event(struct perf_event *event); | |
500 | ||
501 | int x86_pmu_handle_irq(struct pt_regs *regs); | |
502 | ||
503 | extern struct event_constraint emptyconstraint; | |
504 | ||
505 | extern struct event_constraint unconstrained; | |
506 | ||
3e702ff6 SE |
507 | static inline bool kernel_ip(unsigned long ip) |
508 | { | |
509 | #ifdef CONFIG_X86_32 | |
510 | return ip > PAGE_OFFSET; | |
511 | #else | |
512 | return (long)ip < 0; | |
513 | #endif | |
514 | } | |
515 | ||
d07bdfd3 PZ |
516 | /* |
517 | * Not all PMUs provide the right context information to place the reported IP | |
518 | * into full context. Specifically segment registers are typically not | |
519 | * supplied. | |
520 | * | |
521 | * Assuming the address is a linear address (it is for IBS), we fake the CS and | |
522 | * vm86 mode using the known zero-based code segment and 'fix up' the registers | |
523 | * to reflect this. | |
524 | * | |
525 | * Intel PEBS/LBR appear to typically provide the effective address, nothing | |
526 | * much we can do about that but pray and treat it like a linear address. | |
527 | */ | |
528 | static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip) | |
529 | { | |
530 | regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS; | |
531 | if (regs->flags & X86_VM_MASK) | |
532 | regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK); | |
533 | regs->ip = ip; | |
534 | } | |
535 | ||
0bf79d44 | 536 | ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event); |
20550a43 | 537 | ssize_t intel_event_sysfs_show(char *page, u64 config); |
43c032fe | 538 | |
de0428a7 KW |
539 | #ifdef CONFIG_CPU_SUP_AMD |
540 | ||
541 | int amd_pmu_init(void); | |
542 | ||
543 | #else /* CONFIG_CPU_SUP_AMD */ | |
544 | ||
545 | static inline int amd_pmu_init(void) | |
546 | { | |
547 | return 0; | |
548 | } | |
549 | ||
550 | #endif /* CONFIG_CPU_SUP_AMD */ | |
551 | ||
552 | #ifdef CONFIG_CPU_SUP_INTEL | |
553 | ||
554 | int intel_pmu_save_and_restart(struct perf_event *event); | |
555 | ||
556 | struct event_constraint * | |
557 | x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event); | |
558 | ||
559 | struct intel_shared_regs *allocate_shared_regs(int cpu); | |
560 | ||
561 | int intel_pmu_init(void); | |
562 | ||
563 | void init_debug_store_on_cpu(int cpu); | |
564 | ||
565 | void fini_debug_store_on_cpu(int cpu); | |
566 | ||
567 | void release_ds_buffers(void); | |
568 | ||
569 | void reserve_ds_buffers(void); | |
570 | ||
571 | extern struct event_constraint bts_constraint; | |
572 | ||
573 | void intel_pmu_enable_bts(u64 config); | |
574 | ||
575 | void intel_pmu_disable_bts(void); | |
576 | ||
577 | int intel_pmu_drain_bts_buffer(void); | |
578 | ||
579 | extern struct event_constraint intel_core2_pebs_event_constraints[]; | |
580 | ||
581 | extern struct event_constraint intel_atom_pebs_event_constraints[]; | |
582 | ||
583 | extern struct event_constraint intel_nehalem_pebs_event_constraints[]; | |
584 | ||
585 | extern struct event_constraint intel_westmere_pebs_event_constraints[]; | |
586 | ||
587 | extern struct event_constraint intel_snb_pebs_event_constraints[]; | |
588 | ||
20a36e39 SE |
589 | extern struct event_constraint intel_ivb_pebs_event_constraints[]; |
590 | ||
de0428a7 KW |
591 | struct event_constraint *intel_pebs_constraints(struct perf_event *event); |
592 | ||
593 | void intel_pmu_pebs_enable(struct perf_event *event); | |
594 | ||
595 | void intel_pmu_pebs_disable(struct perf_event *event); | |
596 | ||
597 | void intel_pmu_pebs_enable_all(void); | |
598 | ||
599 | void intel_pmu_pebs_disable_all(void); | |
600 | ||
601 | void intel_ds_init(void); | |
602 | ||
603 | void intel_pmu_lbr_reset(void); | |
604 | ||
605 | void intel_pmu_lbr_enable(struct perf_event *event); | |
606 | ||
607 | void intel_pmu_lbr_disable(struct perf_event *event); | |
608 | ||
609 | void intel_pmu_lbr_enable_all(void); | |
610 | ||
611 | void intel_pmu_lbr_disable_all(void); | |
612 | ||
613 | void intel_pmu_lbr_read(void); | |
614 | ||
615 | void intel_pmu_lbr_init_core(void); | |
616 | ||
617 | void intel_pmu_lbr_init_nhm(void); | |
618 | ||
619 | void intel_pmu_lbr_init_atom(void); | |
620 | ||
c5cc2cd9 SE |
621 | void intel_pmu_lbr_init_snb(void); |
622 | ||
60ce0fbd SE |
623 | int intel_pmu_setup_lbr_filter(struct perf_event *event); |
624 | ||
de0428a7 KW |
625 | int p4_pmu_init(void); |
626 | ||
627 | int p6_pmu_init(void); | |
628 | ||
e717bf4e VW |
629 | int knc_pmu_init(void); |
630 | ||
de0428a7 KW |
631 | #else /* CONFIG_CPU_SUP_INTEL */ |
632 | ||
633 | static inline void reserve_ds_buffers(void) | |
634 | { | |
635 | } | |
636 | ||
637 | static inline void release_ds_buffers(void) | |
638 | { | |
639 | } | |
640 | ||
641 | static inline int intel_pmu_init(void) | |
642 | { | |
643 | return 0; | |
644 | } | |
645 | ||
646 | static inline struct intel_shared_regs *allocate_shared_regs(int cpu) | |
647 | { | |
648 | return NULL; | |
649 | } | |
650 | ||
651 | #endif /* CONFIG_CPU_SUP_INTEL */ |