x86: Add new MSRs and MSR bits used for Intel Skylake PMU support
[deliverable/linux.git] / arch / x86 / include / asm / perf_event.h
... / ...
CommitLineData
1#ifndef _ASM_X86_PERF_EVENT_H
2#define _ASM_X86_PERF_EVENT_H
3
4/*
5 * Performance event hw details:
6 */
7
8#define INTEL_PMC_MAX_GENERIC 32
9#define INTEL_PMC_MAX_FIXED 3
10#define INTEL_PMC_IDX_FIXED 32
11
12#define X86_PMC_IDX_MAX 64
13
14#define MSR_ARCH_PERFMON_PERFCTR0 0xc1
15#define MSR_ARCH_PERFMON_PERFCTR1 0xc2
16
17#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
18#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
19
20#define ARCH_PERFMON_EVENTSEL_EVENT 0x000000FFULL
21#define ARCH_PERFMON_EVENTSEL_UMASK 0x0000FF00ULL
22#define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16)
23#define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
24#define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18)
25#define ARCH_PERFMON_EVENTSEL_PIN_CONTROL (1ULL << 19)
26#define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20)
27#define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21)
28#define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
29#define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
30#define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
31
32#define HSW_IN_TX (1ULL << 32)
33#define HSW_IN_TX_CHECKPOINTED (1ULL << 33)
34
35#define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36)
36#define AMD64_EVENTSEL_GUESTONLY (1ULL << 40)
37#define AMD64_EVENTSEL_HOSTONLY (1ULL << 41)
38
39#define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT 37
40#define AMD64_EVENTSEL_INT_CORE_SEL_MASK \
41 (0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT)
42
43#define AMD64_EVENTSEL_EVENT \
44 (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
45#define INTEL_ARCH_EVENT_MASK \
46 (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
47
48#define X86_RAW_EVENT_MASK \
49 (ARCH_PERFMON_EVENTSEL_EVENT | \
50 ARCH_PERFMON_EVENTSEL_UMASK | \
51 ARCH_PERFMON_EVENTSEL_EDGE | \
52 ARCH_PERFMON_EVENTSEL_INV | \
53 ARCH_PERFMON_EVENTSEL_CMASK)
54#define X86_ALL_EVENT_FLAGS \
55 (ARCH_PERFMON_EVENTSEL_EDGE | \
56 ARCH_PERFMON_EVENTSEL_INV | \
57 ARCH_PERFMON_EVENTSEL_CMASK | \
58 ARCH_PERFMON_EVENTSEL_ANY | \
59 ARCH_PERFMON_EVENTSEL_PIN_CONTROL | \
60 HSW_IN_TX | \
61 HSW_IN_TX_CHECKPOINTED)
62#define AMD64_RAW_EVENT_MASK \
63 (X86_RAW_EVENT_MASK | \
64 AMD64_EVENTSEL_EVENT)
65#define AMD64_RAW_EVENT_MASK_NB \
66 (AMD64_EVENTSEL_EVENT | \
67 ARCH_PERFMON_EVENTSEL_UMASK)
68#define AMD64_NUM_COUNTERS 4
69#define AMD64_NUM_COUNTERS_CORE 6
70#define AMD64_NUM_COUNTERS_NB 4
71
72#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
73#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
74#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0
75#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
76 (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
77
78#define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6
79#define ARCH_PERFMON_EVENTS_COUNT 7
80
81/*
82 * Intel "Architectural Performance Monitoring" CPUID
83 * detection/enumeration details:
84 */
85union cpuid10_eax {
86 struct {
87 unsigned int version_id:8;
88 unsigned int num_counters:8;
89 unsigned int bit_width:8;
90 unsigned int mask_length:8;
91 } split;
92 unsigned int full;
93};
94
95union cpuid10_ebx {
96 struct {
97 unsigned int no_unhalted_core_cycles:1;
98 unsigned int no_instructions_retired:1;
99 unsigned int no_unhalted_reference_cycles:1;
100 unsigned int no_llc_reference:1;
101 unsigned int no_llc_misses:1;
102 unsigned int no_branch_instruction_retired:1;
103 unsigned int no_branch_misses_retired:1;
104 } split;
105 unsigned int full;
106};
107
108union cpuid10_edx {
109 struct {
110 unsigned int num_counters_fixed:5;
111 unsigned int bit_width_fixed:8;
112 unsigned int reserved:19;
113 } split;
114 unsigned int full;
115};
116
117struct x86_pmu_capability {
118 int version;
119 int num_counters_gp;
120 int num_counters_fixed;
121 int bit_width_gp;
122 int bit_width_fixed;
123 unsigned int events_mask;
124 int events_mask_len;
125};
126
127/*
128 * Fixed-purpose performance events:
129 */
130
131/*
132 * All 3 fixed-mode PMCs are configured via this single MSR:
133 */
134#define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d
135
136/*
137 * The counts are available in three separate MSRs:
138 */
139
140/* Instr_Retired.Any: */
141#define MSR_ARCH_PERFMON_FIXED_CTR0 0x309
142#define INTEL_PMC_IDX_FIXED_INSTRUCTIONS (INTEL_PMC_IDX_FIXED + 0)
143
144/* CPU_CLK_Unhalted.Core: */
145#define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a
146#define INTEL_PMC_IDX_FIXED_CPU_CYCLES (INTEL_PMC_IDX_FIXED + 1)
147
148/* CPU_CLK_Unhalted.Ref: */
149#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
150#define INTEL_PMC_IDX_FIXED_REF_CYCLES (INTEL_PMC_IDX_FIXED + 2)
151#define INTEL_PMC_MSK_FIXED_REF_CYCLES (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
152
153/*
154 * We model BTS tracing as another fixed-mode PMC.
155 *
156 * We choose a value in the middle of the fixed event range, since lower
157 * values are used by actual fixed events and higher values are used
158 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
159 */
160#define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 16)
161
162#define GLOBAL_STATUS_COND_CHG BIT_ULL(63)
163#define GLOBAL_STATUS_BUFFER_OVF BIT_ULL(62)
164#define GLOBAL_STATUS_UNC_OVF BIT_ULL(61)
165#define GLOBAL_STATUS_ASIF BIT_ULL(60)
166#define GLOBAL_STATUS_COUNTERS_FROZEN BIT_ULL(59)
167#define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(58)
168
169/*
170 * IBS cpuid feature detection
171 */
172
173#define IBS_CPUID_FEATURES 0x8000001b
174
175/*
176 * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
177 * bit 0 is used to indicate the existence of IBS.
178 */
179#define IBS_CAPS_AVAIL (1U<<0)
180#define IBS_CAPS_FETCHSAM (1U<<1)
181#define IBS_CAPS_OPSAM (1U<<2)
182#define IBS_CAPS_RDWROPCNT (1U<<3)
183#define IBS_CAPS_OPCNT (1U<<4)
184#define IBS_CAPS_BRNTRGT (1U<<5)
185#define IBS_CAPS_OPCNTEXT (1U<<6)
186#define IBS_CAPS_RIPINVALIDCHK (1U<<7)
187#define IBS_CAPS_OPBRNFUSE (1U<<8)
188#define IBS_CAPS_FETCHCTLEXTD (1U<<9)
189#define IBS_CAPS_OPDATA4 (1U<<10)
190
191#define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \
192 | IBS_CAPS_FETCHSAM \
193 | IBS_CAPS_OPSAM)
194
195/*
196 * IBS APIC setup
197 */
198#define IBSCTL 0x1cc
199#define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
200#define IBSCTL_LVT_OFFSET_MASK 0x0F
201
202/* ibs fetch bits/masks */
203#define IBS_FETCH_RAND_EN (1ULL<<57)
204#define IBS_FETCH_VAL (1ULL<<49)
205#define IBS_FETCH_ENABLE (1ULL<<48)
206#define IBS_FETCH_CNT 0xFFFF0000ULL
207#define IBS_FETCH_MAX_CNT 0x0000FFFFULL
208
209/* ibs op bits/masks */
210/* lower 4 bits of the current count are ignored: */
211#define IBS_OP_CUR_CNT (0xFFFF0ULL<<32)
212#define IBS_OP_CNT_CTL (1ULL<<19)
213#define IBS_OP_VAL (1ULL<<18)
214#define IBS_OP_ENABLE (1ULL<<17)
215#define IBS_OP_MAX_CNT 0x0000FFFFULL
216#define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */
217#define IBS_RIP_INVALID (1ULL<<38)
218
219#ifdef CONFIG_X86_LOCAL_APIC
220extern u32 get_ibs_caps(void);
221#else
222static inline u32 get_ibs_caps(void) { return 0; }
223#endif
224
225#ifdef CONFIG_PERF_EVENTS
226extern void perf_events_lapic_init(void);
227
228/*
229 * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise
230 * unused and ABI specified to be 0, so nobody should care what we do with
231 * them.
232 *
233 * EXACT - the IP points to the exact instruction that triggered the
234 * event (HW bugs exempt).
235 * VM - original X86_VM_MASK; see set_linear_ip().
236 */
237#define PERF_EFLAGS_EXACT (1UL << 3)
238#define PERF_EFLAGS_VM (1UL << 5)
239
240struct pt_regs;
241extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
242extern unsigned long perf_misc_flags(struct pt_regs *regs);
243#define perf_misc_flags(regs) perf_misc_flags(regs)
244
245#include <asm/stacktrace.h>
246
247/*
248 * We abuse bit 3 from flags to pass exact information, see perf_misc_flags
249 * and the comment with PERF_EFLAGS_EXACT.
250 */
251#define perf_arch_fetch_caller_regs(regs, __ip) { \
252 (regs)->ip = (__ip); \
253 (regs)->bp = caller_frame_pointer(); \
254 (regs)->cs = __KERNEL_CS; \
255 regs->flags = 0; \
256 asm volatile( \
257 _ASM_MOV "%%"_ASM_SP ", %0\n" \
258 : "=m" ((regs)->sp) \
259 :: "memory" \
260 ); \
261}
262
263struct perf_guest_switch_msr {
264 unsigned msr;
265 u64 host, guest;
266};
267
268extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
269extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
270extern void perf_check_microcode(void);
271#else
272static inline struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
273{
274 *nr = 0;
275 return NULL;
276}
277
278static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
279{
280 memset(cap, 0, sizeof(*cap));
281}
282
283static inline void perf_events_lapic_init(void) { }
284static inline void perf_check_microcode(void) { }
285#endif
286
287#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
288 extern void amd_pmu_enable_virt(void);
289 extern void amd_pmu_disable_virt(void);
290#else
291 static inline void amd_pmu_enable_virt(void) { }
292 static inline void amd_pmu_disable_virt(void) { }
293#endif
294
295#define arch_perf_out_copy_user copy_from_user_nmi
296
297#endif /* _ASM_X86_PERF_EVENT_H */
This page took 0.029464 seconds and 5 git commands to generate.