x86: Move call to print_modules() out of show_regs()
[deliverable/linux.git] / arch / x86 / include / asm / perf_event.h
1 #ifndef _ASM_X86_PERF_EVENT_H
2 #define _ASM_X86_PERF_EVENT_H
3
4 /*
5 * Performance event hw details:
6 */
7
8 #define X86_PMC_MAX_GENERIC 32
9 #define X86_PMC_MAX_FIXED 3
10
11 #define X86_PMC_IDX_GENERIC 0
12 #define X86_PMC_IDX_FIXED 32
13 #define X86_PMC_IDX_MAX 64
14
15 #define MSR_ARCH_PERFMON_PERFCTR0 0xc1
16 #define MSR_ARCH_PERFMON_PERFCTR1 0xc2
17
18 #define MSR_ARCH_PERFMON_EVENTSEL0 0x186
19 #define MSR_ARCH_PERFMON_EVENTSEL1 0x187
20
21 #define ARCH_PERFMON_EVENTSEL_EVENT 0x000000FFULL
22 #define ARCH_PERFMON_EVENTSEL_UMASK 0x0000FF00ULL
23 #define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16)
24 #define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
25 #define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18)
26 #define ARCH_PERFMON_EVENTSEL_PIN_CONTROL (1ULL << 19)
27 #define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20)
28 #define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21)
29 #define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
30 #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
31 #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
32
33 #define AMD_PERFMON_EVENTSEL_GUESTONLY (1ULL << 40)
34 #define AMD_PERFMON_EVENTSEL_HOSTONLY (1ULL << 41)
35
36 #define AMD64_EVENTSEL_EVENT \
37 (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
38 #define INTEL_ARCH_EVENT_MASK \
39 (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
40
41 #define X86_RAW_EVENT_MASK \
42 (ARCH_PERFMON_EVENTSEL_EVENT | \
43 ARCH_PERFMON_EVENTSEL_UMASK | \
44 ARCH_PERFMON_EVENTSEL_EDGE | \
45 ARCH_PERFMON_EVENTSEL_INV | \
46 ARCH_PERFMON_EVENTSEL_CMASK)
47 #define AMD64_RAW_EVENT_MASK \
48 (X86_RAW_EVENT_MASK | \
49 AMD64_EVENTSEL_EVENT)
50 #define AMD64_NUM_COUNTERS 4
51 #define AMD64_NUM_COUNTERS_F15H 6
52 #define AMD64_NUM_COUNTERS_MAX AMD64_NUM_COUNTERS_F15H
53
54 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
55 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
56 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0
57 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
58 (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
59
60 #define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6
61 #define ARCH_PERFMON_EVENTS_COUNT 7
62
63 /*
64 * Intel "Architectural Performance Monitoring" CPUID
65 * detection/enumeration details:
66 */
67 union cpuid10_eax {
68 struct {
69 unsigned int version_id:8;
70 unsigned int num_counters:8;
71 unsigned int bit_width:8;
72 unsigned int mask_length:8;
73 } split;
74 unsigned int full;
75 };
76
77 union cpuid10_ebx {
78 struct {
79 unsigned int no_unhalted_core_cycles:1;
80 unsigned int no_instructions_retired:1;
81 unsigned int no_unhalted_reference_cycles:1;
82 unsigned int no_llc_reference:1;
83 unsigned int no_llc_misses:1;
84 unsigned int no_branch_instruction_retired:1;
85 unsigned int no_branch_misses_retired:1;
86 } split;
87 unsigned int full;
88 };
89
90 union cpuid10_edx {
91 struct {
92 unsigned int num_counters_fixed:5;
93 unsigned int bit_width_fixed:8;
94 unsigned int reserved:19;
95 } split;
96 unsigned int full;
97 };
98
99 struct x86_pmu_capability {
100 int version;
101 int num_counters_gp;
102 int num_counters_fixed;
103 int bit_width_gp;
104 int bit_width_fixed;
105 unsigned int events_mask;
106 int events_mask_len;
107 };
108
109 /*
110 * Fixed-purpose performance events:
111 */
112
113 /*
114 * All 3 fixed-mode PMCs are configured via this single MSR:
115 */
116 #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d
117
118 /*
119 * The counts are available in three separate MSRs:
120 */
121
122 /* Instr_Retired.Any: */
123 #define MSR_ARCH_PERFMON_FIXED_CTR0 0x309
124 #define X86_PMC_IDX_FIXED_INSTRUCTIONS (X86_PMC_IDX_FIXED + 0)
125
126 /* CPU_CLK_Unhalted.Core: */
127 #define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a
128 #define X86_PMC_IDX_FIXED_CPU_CYCLES (X86_PMC_IDX_FIXED + 1)
129
130 /* CPU_CLK_Unhalted.Ref: */
131 #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
132 #define X86_PMC_IDX_FIXED_REF_CYCLES (X86_PMC_IDX_FIXED + 2)
133 #define X86_PMC_MSK_FIXED_REF_CYCLES (1ULL << X86_PMC_IDX_FIXED_REF_CYCLES)
134
135 /*
136 * We model BTS tracing as another fixed-mode PMC.
137 *
138 * We choose a value in the middle of the fixed event range, since lower
139 * values are used by actual fixed events and higher values are used
140 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
141 */
142 #define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16)
143
144 /*
145 * IBS cpuid feature detection
146 */
147
148 #define IBS_CPUID_FEATURES 0x8000001b
149
150 /*
151 * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
152 * bit 0 is used to indicate the existence of IBS.
153 */
154 #define IBS_CAPS_AVAIL (1U<<0)
155 #define IBS_CAPS_FETCHSAM (1U<<1)
156 #define IBS_CAPS_OPSAM (1U<<2)
157 #define IBS_CAPS_RDWROPCNT (1U<<3)
158 #define IBS_CAPS_OPCNT (1U<<4)
159 #define IBS_CAPS_BRNTRGT (1U<<5)
160 #define IBS_CAPS_OPCNTEXT (1U<<6)
161 #define IBS_CAPS_RIPINVALIDCHK (1U<<7)
162
163 #define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \
164 | IBS_CAPS_FETCHSAM \
165 | IBS_CAPS_OPSAM)
166
167 /*
168 * IBS APIC setup
169 */
170 #define IBSCTL 0x1cc
171 #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
172 #define IBSCTL_LVT_OFFSET_MASK 0x0F
173
174 /* ibs fetch bits/masks */
175 #define IBS_FETCH_RAND_EN (1ULL<<57)
176 #define IBS_FETCH_VAL (1ULL<<49)
177 #define IBS_FETCH_ENABLE (1ULL<<48)
178 #define IBS_FETCH_CNT 0xFFFF0000ULL
179 #define IBS_FETCH_MAX_CNT 0x0000FFFFULL
180
181 /* ibs op bits/masks */
182 /* lower 4 bits of the current count are ignored: */
183 #define IBS_OP_CUR_CNT (0xFFFF0ULL<<32)
184 #define IBS_OP_CNT_CTL (1ULL<<19)
185 #define IBS_OP_VAL (1ULL<<18)
186 #define IBS_OP_ENABLE (1ULL<<17)
187 #define IBS_OP_MAX_CNT 0x0000FFFFULL
188 #define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */
189 #define IBS_RIP_INVALID (1ULL<<38)
190
191 #ifdef CONFIG_X86_LOCAL_APIC
192 extern u32 get_ibs_caps(void);
193 #else
194 static inline u32 get_ibs_caps(void) { return 0; }
195 #endif
196
197 #ifdef CONFIG_PERF_EVENTS
198 extern void perf_events_lapic_init(void);
199
200 /*
201 * Abuse bit 3 of the cpu eflags register to indicate proper PEBS IP fixups.
202 * This flag is otherwise unused and ABI specified to be 0, so nobody should
203 * care what we do with it.
204 */
205 #define PERF_EFLAGS_EXACT (1UL << 3)
206
207 struct pt_regs;
208 extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
209 extern unsigned long perf_misc_flags(struct pt_regs *regs);
210 #define perf_misc_flags(regs) perf_misc_flags(regs)
211
212 #include <asm/stacktrace.h>
213
214 /*
215 * We abuse bit 3 from flags to pass exact information, see perf_misc_flags
216 * and the comment with PERF_EFLAGS_EXACT.
217 */
218 #define perf_arch_fetch_caller_regs(regs, __ip) { \
219 (regs)->ip = (__ip); \
220 (regs)->bp = caller_frame_pointer(); \
221 (regs)->cs = __KERNEL_CS; \
222 regs->flags = 0; \
223 asm volatile( \
224 _ASM_MOV "%%"_ASM_SP ", %0\n" \
225 : "=m" ((regs)->sp) \
226 :: "memory" \
227 ); \
228 }
229
230 struct perf_guest_switch_msr {
231 unsigned msr;
232 u64 host, guest;
233 };
234
235 extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
236 extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
237 #else
238 static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
239 {
240 *nr = 0;
241 return NULL;
242 }
243
244 static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
245 {
246 memset(cap, 0, sizeof(*cap));
247 }
248
249 static inline void perf_events_lapic_init(void) { }
250 #endif
251
252 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
253 extern void amd_pmu_enable_virt(void);
254 extern void amd_pmu_disable_virt(void);
255 #else
256 static inline void amd_pmu_enable_virt(void) { }
257 static inline void amd_pmu_disable_virt(void) { }
258 #endif
259
260 #endif /* _ASM_X86_PERF_EVENT_H */
This page took 0.039107 seconds and 5 git commands to generate.