perf tools: Adjust make rules
[deliverable/linux.git] / arch / x86 / kernel / cpu / perf_event.h
CommitLineData
de0428a7
KW
1/*
2 * Performance events x86 architecture header
3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10 * Copyright (C) 2009 Google, Inc., Stephane Eranian
11 *
12 * For licencing details see kernel-base/COPYING
13 */
14
15#include <linux/perf_event.h>
16
17/*
18 * | NHM/WSM | SNB |
19 * register -------------------------------
20 * | HT | no HT | HT | no HT |
21 *-----------------------------------------
22 * offcore | core | core | cpu | core |
23 * lbr_sel | core | core | cpu | core |
24 * ld_lat | cpu | core | cpu | core |
25 *-----------------------------------------
26 *
27 * Given that there is a small number of shared regs,
28 * we can pre-allocate their slot in the per-cpu
29 * per-core reg tables.
30 */
31enum extra_reg_type {
32 EXTRA_REG_NONE = -1, /* not used */
33
34 EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */
35 EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */
b36817e8 36 EXTRA_REG_LBR = 2, /* lbr_select */
de0428a7
KW
37
38 EXTRA_REG_MAX /* number of entries needed */
39};
40
41struct event_constraint {
42 union {
43 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
44 u64 idxmsk64;
45 };
46 u64 code;
47 u64 cmask;
48 int weight;
bc1738f6 49 int overlap;
de0428a7
KW
50};
51
52struct amd_nb {
53 int nb_id; /* NorthBridge id */
54 int refcnt; /* reference count */
55 struct perf_event *owners[X86_PMC_IDX_MAX];
56 struct event_constraint event_constraints[X86_PMC_IDX_MAX];
57};
58
59/* The maximal number of PEBS events: */
60#define MAX_PEBS_EVENTS 4
61
62/*
63 * A debug store configuration.
64 *
65 * We only support architectures that use 64bit fields.
66 */
67struct debug_store {
68 u64 bts_buffer_base;
69 u64 bts_index;
70 u64 bts_absolute_maximum;
71 u64 bts_interrupt_threshold;
72 u64 pebs_buffer_base;
73 u64 pebs_index;
74 u64 pebs_absolute_maximum;
75 u64 pebs_interrupt_threshold;
76 u64 pebs_event_reset[MAX_PEBS_EVENTS];
77};
78
79/*
80 * Per register state.
81 */
82struct er_account {
83 raw_spinlock_t lock; /* per-core: protect structure */
84 u64 config; /* extra MSR config */
85 u64 reg; /* extra MSR number */
86 atomic_t ref; /* reference count */
87};
88
89/*
90 * Per core/cpu state
91 *
92 * Used to coordinate shared registers between HT threads or
93 * among events on a single PMU.
94 */
95struct intel_shared_regs {
96 struct er_account regs[EXTRA_REG_MAX];
97 int refcnt; /* per-core: #HT threads */
98 unsigned core_id; /* per-core: core id */
99};
100
101#define MAX_LBR_ENTRIES 16
102
103struct cpu_hw_events {
104 /*
105 * Generic x86 PMC bits
106 */
107 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
108 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
109 unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
110 int enabled;
111
112 int n_events;
113 int n_added;
114 int n_txn;
115 int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
116 u64 tags[X86_PMC_IDX_MAX];
117 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
118
119 unsigned int group_flag;
120
121 /*
122 * Intel DebugStore bits
123 */
124 struct debug_store *ds;
125 u64 pebs_enabled;
126
127 /*
128 * Intel LBR bits
129 */
130 int lbr_users;
131 void *lbr_context;
132 struct perf_branch_stack lbr_stack;
133 struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
b36817e8 134 struct er_account *lbr_sel;
3e702ff6 135 u64 br_sel;
de0428a7 136
144d31e6
GN
137 /*
138 * Intel host/guest exclude bits
139 */
140 u64 intel_ctrl_guest_mask;
141 u64 intel_ctrl_host_mask;
142 struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX];
143
de0428a7
KW
144 /*
145 * manage shared (per-core, per-cpu) registers
146 * used on Intel NHM/WSM/SNB
147 */
148 struct intel_shared_regs *shared_regs;
149
150 /*
151 * AMD specific bits
152 */
1018faa6
JR
153 struct amd_nb *amd_nb;
154 /* Inverted mask of bits to clear in the perf_ctr ctrl registers */
155 u64 perf_ctr_virt_mask;
de0428a7
KW
156
157 void *kfree_on_online;
158};
159
bc1738f6 160#define __EVENT_CONSTRAINT(c, n, m, w, o) {\
de0428a7
KW
161 { .idxmsk64 = (n) }, \
162 .code = (c), \
163 .cmask = (m), \
164 .weight = (w), \
bc1738f6 165 .overlap = (o), \
de0428a7
KW
166}
167
168#define EVENT_CONSTRAINT(c, n, m) \
bc1738f6
RR
169 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0)
170
171/*
172 * The overlap flag marks event constraints with overlapping counter
173 * masks. This is the case if the counter mask of such an event is not
174 * a subset of any other counter mask of a constraint with an equal or
175 * higher weight, e.g.:
176 *
177 * c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
178 * c_another1 = EVENT_CONSTRAINT(0, 0x07, 0);
179 * c_another2 = EVENT_CONSTRAINT(0, 0x38, 0);
180 *
181 * The event scheduler may not select the correct counter in the first
182 * cycle because it needs to know which subsequent events will be
183 * scheduled. It may fail to schedule the events then. So we set the
184 * overlap flag for such constraints to give the scheduler a hint which
185 * events to select for counter rescheduling.
186 *
187 * Care must be taken as the rescheduling algorithm is O(n!) which
188 * will increase scheduling cycles for an over-commited system
189 * dramatically. The number of such EVENT_CONSTRAINT_OVERLAP() macros
190 * and its counter masks must be kept at a minimum.
191 */
192#define EVENT_CONSTRAINT_OVERLAP(c, n, m) \
193 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1)
de0428a7
KW
194
195/*
196 * Constraint on the Event code.
197 */
198#define INTEL_EVENT_CONSTRAINT(c, n) \
199 EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
200
201/*
202 * Constraint on the Event code + UMask + fixed-mask
203 *
204 * filter mask to validate fixed counter events.
205 * the following filters disqualify for fixed counters:
206 * - inv
207 * - edge
208 * - cnt-mask
209 * The other filters are supported by fixed counters.
210 * The any-thread option is supported starting with v3.
211 */
212#define FIXED_EVENT_CONSTRAINT(c, n) \
213 EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK)
214
215/*
216 * Constraint on the Event code + UMask
217 */
218#define INTEL_UEVENT_CONSTRAINT(c, n) \
219 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
220
221#define EVENT_CONSTRAINT_END \
222 EVENT_CONSTRAINT(0, 0, 0)
223
224#define for_each_event_constraint(e, c) \
225 for ((e) = (c); (e)->weight; (e)++)
226
227/*
228 * Extra registers for specific events.
229 *
230 * Some events need large masks and require external MSRs.
231 * Those extra MSRs end up being shared for all events on
232 * a PMU and sometimes between PMU of sibling HT threads.
233 * In either case, the kernel needs to handle conflicting
234 * accesses to those extra, shared, regs. The data structure
235 * to manage those registers is stored in cpu_hw_event.
236 */
237struct extra_reg {
238 unsigned int event;
239 unsigned int msr;
240 u64 config_mask;
241 u64 valid_mask;
242 int idx; /* per_xxx->regs[] reg index */
243};
244
245#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \
246 .event = (e), \
247 .msr = (ms), \
248 .config_mask = (m), \
249 .valid_mask = (vm), \
250 .idx = EXTRA_REG_##i \
251 }
252
253#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
254 EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
255
256#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
257
258union perf_capabilities {
259 struct {
260 u64 lbr_format:6;
261 u64 pebs_trap:1;
262 u64 pebs_arch_reg:1;
263 u64 pebs_format:4;
264 u64 smm_freeze:1;
265 };
266 u64 capabilities;
267};
268
c1d6f42f
PZ
269struct x86_pmu_quirk {
270 struct x86_pmu_quirk *next;
271 void (*func)(void);
272};
273
f9b4eeb8
PZ
274union x86_pmu_config {
275 struct {
276 u64 event:8,
277 umask:8,
278 usr:1,
279 os:1,
280 edge:1,
281 pc:1,
282 interrupt:1,
283 __reserved1:1,
284 en:1,
285 inv:1,
286 cmask:8,
287 event2:4,
288 __reserved2:4,
289 go:1,
290 ho:1;
291 } bits;
292 u64 value;
293};
294
295#define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value
296
de0428a7
KW
297/*
298 * struct x86_pmu - generic x86 pmu
299 */
300struct x86_pmu {
301 /*
302 * Generic x86 PMC bits
303 */
304 const char *name;
305 int version;
306 int (*handle_irq)(struct pt_regs *);
307 void (*disable_all)(void);
308 void (*enable_all)(int added);
309 void (*enable)(struct perf_event *);
310 void (*disable)(struct perf_event *);
311 int (*hw_config)(struct perf_event *event);
312 int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
313 unsigned eventsel;
314 unsigned perfctr;
315 u64 (*event_map)(int);
316 int max_events;
317 int num_counters;
318 int num_counters_fixed;
319 int cntval_bits;
320 u64 cntval_mask;
ffb871bc
GN
321 union {
322 unsigned long events_maskl;
323 unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)];
324 };
325 int events_mask_len;
de0428a7
KW
326 int apic;
327 u64 max_period;
328 struct event_constraint *
329 (*get_event_constraints)(struct cpu_hw_events *cpuc,
330 struct perf_event *event);
331
332 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
333 struct perf_event *event);
334 struct event_constraint *event_constraints;
c1d6f42f 335 struct x86_pmu_quirk *quirks;
de0428a7
KW
336 int perfctr_second_write;
337
0c9d42ed
PZ
338 /*
339 * sysfs attrs
340 */
341 int attr_rdpmc;
342
343 /*
344 * CPU Hotplug hooks
345 */
de0428a7
KW
346 int (*cpu_prepare)(int cpu);
347 void (*cpu_starting)(int cpu);
348 void (*cpu_dying)(int cpu);
349 void (*cpu_dead)(int cpu);
d010b332 350 void (*flush_branch_stack)(void);
de0428a7
KW
351
352 /*
353 * Intel Arch Perfmon v2+
354 */
355 u64 intel_ctrl;
356 union perf_capabilities intel_cap;
357
358 /*
359 * Intel DebugStore bits
360 */
361 int bts, pebs;
362 int bts_active, pebs_active;
363 int pebs_record_size;
364 void (*drain_pebs)(struct pt_regs *regs);
365 struct event_constraint *pebs_constraints;
366
367 /*
368 * Intel LBR
369 */
370 unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */
371 int lbr_nr; /* hardware stack size */
b36817e8
SE
372 u64 lbr_sel_mask; /* LBR_SELECT valid bits */
373 const int *lbr_sel_map; /* lbr_select mappings */
de0428a7
KW
374
375 /*
376 * Extra registers for events
377 */
378 struct extra_reg *extra_regs;
379 unsigned int er_flags;
144d31e6
GN
380
381 /*
382 * Intel host/guest support (KVM)
383 */
384 struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
de0428a7
KW
385};
386
c1d6f42f
PZ
387#define x86_add_quirk(func_) \
388do { \
389 static struct x86_pmu_quirk __quirk __initdata = { \
390 .func = func_, \
391 }; \
392 __quirk.next = x86_pmu.quirks; \
393 x86_pmu.quirks = &__quirk; \
394} while (0)
395
de0428a7
KW
396#define ERF_NO_HT_SHARING 1
397#define ERF_HAS_RSP_1 2
398
399extern struct x86_pmu x86_pmu __read_mostly;
400
401DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
402
403int x86_perf_event_set_period(struct perf_event *event);
404
405/*
406 * Generalized hw caching related hw_event table, filled
407 * in on a per model basis. A value of 0 means
408 * 'not supported', -1 means 'hw_event makes no sense on
409 * this CPU', any other value means the raw hw_event
410 * ID.
411 */
412
413#define C(x) PERF_COUNT_HW_CACHE_##x
414
415extern u64 __read_mostly hw_cache_event_ids
416 [PERF_COUNT_HW_CACHE_MAX]
417 [PERF_COUNT_HW_CACHE_OP_MAX]
418 [PERF_COUNT_HW_CACHE_RESULT_MAX];
419extern u64 __read_mostly hw_cache_extra_regs
420 [PERF_COUNT_HW_CACHE_MAX]
421 [PERF_COUNT_HW_CACHE_OP_MAX]
422 [PERF_COUNT_HW_CACHE_RESULT_MAX];
423
424u64 x86_perf_event_update(struct perf_event *event);
425
426static inline int x86_pmu_addr_offset(int index)
427{
428 int offset;
429
430 /* offset = X86_FEATURE_PERFCTR_CORE ? index << 1 : index */
431 alternative_io(ASM_NOP2,
432 "shll $1, %%eax",
433 X86_FEATURE_PERFCTR_CORE,
434 "=a" (offset),
435 "a" (index));
436
437 return offset;
438}
439
440static inline unsigned int x86_pmu_config_addr(int index)
441{
442 return x86_pmu.eventsel + x86_pmu_addr_offset(index);
443}
444
445static inline unsigned int x86_pmu_event_addr(int index)
446{
447 return x86_pmu.perfctr + x86_pmu_addr_offset(index);
448}
449
450int x86_setup_perfctr(struct perf_event *event);
451
452int x86_pmu_hw_config(struct perf_event *event);
453
454void x86_pmu_disable_all(void);
455
456static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
457 u64 enable_mask)
458{
1018faa6
JR
459 u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
460
de0428a7
KW
461 if (hwc->extra_reg.reg)
462 wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
1018faa6 463 wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
de0428a7
KW
464}
465
466void x86_pmu_enable_all(int added);
467
468int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
469
470void x86_pmu_stop(struct perf_event *event, int flags);
471
472static inline void x86_pmu_disable_event(struct perf_event *event)
473{
474 struct hw_perf_event *hwc = &event->hw;
475
476 wrmsrl(hwc->config_base, hwc->config);
477}
478
479void x86_pmu_enable_event(struct perf_event *event);
480
481int x86_pmu_handle_irq(struct pt_regs *regs);
482
483extern struct event_constraint emptyconstraint;
484
485extern struct event_constraint unconstrained;
486
3e702ff6
SE
487static inline bool kernel_ip(unsigned long ip)
488{
489#ifdef CONFIG_X86_32
490 return ip > PAGE_OFFSET;
491#else
492 return (long)ip < 0;
493#endif
494}
495
de0428a7
KW
496#ifdef CONFIG_CPU_SUP_AMD
497
498int amd_pmu_init(void);
499
500#else /* CONFIG_CPU_SUP_AMD */
501
502static inline int amd_pmu_init(void)
503{
504 return 0;
505}
506
507#endif /* CONFIG_CPU_SUP_AMD */
508
509#ifdef CONFIG_CPU_SUP_INTEL
510
511int intel_pmu_save_and_restart(struct perf_event *event);
512
513struct event_constraint *
514x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event);
515
516struct intel_shared_regs *allocate_shared_regs(int cpu);
517
518int intel_pmu_init(void);
519
520void init_debug_store_on_cpu(int cpu);
521
522void fini_debug_store_on_cpu(int cpu);
523
524void release_ds_buffers(void);
525
526void reserve_ds_buffers(void);
527
528extern struct event_constraint bts_constraint;
529
530void intel_pmu_enable_bts(u64 config);
531
532void intel_pmu_disable_bts(void);
533
534int intel_pmu_drain_bts_buffer(void);
535
536extern struct event_constraint intel_core2_pebs_event_constraints[];
537
538extern struct event_constraint intel_atom_pebs_event_constraints[];
539
540extern struct event_constraint intel_nehalem_pebs_event_constraints[];
541
542extern struct event_constraint intel_westmere_pebs_event_constraints[];
543
544extern struct event_constraint intel_snb_pebs_event_constraints[];
545
546struct event_constraint *intel_pebs_constraints(struct perf_event *event);
547
548void intel_pmu_pebs_enable(struct perf_event *event);
549
550void intel_pmu_pebs_disable(struct perf_event *event);
551
552void intel_pmu_pebs_enable_all(void);
553
554void intel_pmu_pebs_disable_all(void);
555
556void intel_ds_init(void);
557
558void intel_pmu_lbr_reset(void);
559
560void intel_pmu_lbr_enable(struct perf_event *event);
561
562void intel_pmu_lbr_disable(struct perf_event *event);
563
564void intel_pmu_lbr_enable_all(void);
565
566void intel_pmu_lbr_disable_all(void);
567
568void intel_pmu_lbr_read(void);
569
570void intel_pmu_lbr_init_core(void);
571
572void intel_pmu_lbr_init_nhm(void);
573
574void intel_pmu_lbr_init_atom(void);
575
c5cc2cd9
SE
576void intel_pmu_lbr_init_snb(void);
577
60ce0fbd
SE
578int intel_pmu_setup_lbr_filter(struct perf_event *event);
579
de0428a7
KW
580int p4_pmu_init(void);
581
582int p6_pmu_init(void);
583
584#else /* CONFIG_CPU_SUP_INTEL */
585
586static inline void reserve_ds_buffers(void)
587{
588}
589
590static inline void release_ds_buffers(void)
591{
592}
593
594static inline int intel_pmu_init(void)
595{
596 return 0;
597}
598
599static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
600{
601 return NULL;
602}
603
604#endif /* CONFIG_CPU_SUP_INTEL */
This page took 0.059881 seconds and 5 git commands to generate.