xen/PMU: Describe vendor-specific PMU registers
[deliverable/linux.git] / arch / x86 / xen / pmu.c
CommitLineData
65d0cf0b
BO
1#include <linux/types.h>
2#include <linux/interrupt.h>
3
4#include <asm/xen/hypercall.h>
5#include <xen/page.h>
6#include <xen/interface/xen.h>
7#include <xen/interface/vcpu.h>
8#include <xen/interface/xenpmu.h>
9
10#include "xen-ops.h"
11#include "pmu.h"
12
13/* x86_pmu.handle_irq definition */
14#include "../kernel/cpu/perf_event.h"
15
16
17/* Shared page between hypervisor and domain */
18static DEFINE_PER_CPU(struct xen_pmu_data *, xenpmu_shared);
19#define get_xenpmu_data() per_cpu(xenpmu_shared, smp_processor_id())
20
e27b72df
BO
21
22/* AMD PMU */
23#define F15H_NUM_COUNTERS 6
24#define F10H_NUM_COUNTERS 4
25
26static __read_mostly uint32_t amd_counters_base;
27static __read_mostly uint32_t amd_ctrls_base;
28static __read_mostly int amd_msr_step;
29static __read_mostly int k7_counters_mirrored;
30static __read_mostly int amd_num_counters;
31
32/* Intel PMU */
33#define MSR_TYPE_COUNTER 0
34#define MSR_TYPE_CTRL 1
35#define MSR_TYPE_GLOBAL 2
36#define MSR_TYPE_ARCH_COUNTER 3
37#define MSR_TYPE_ARCH_CTRL 4
38
39/* Number of general pmu registers (CPUID.EAX[0xa].EAX[8..15]) */
40#define PMU_GENERAL_NR_SHIFT 8
41#define PMU_GENERAL_NR_BITS 8
42#define PMU_GENERAL_NR_MASK (((1 << PMU_GENERAL_NR_BITS) - 1) \
43 << PMU_GENERAL_NR_SHIFT)
44
45/* Number of fixed pmu registers (CPUID.EDX[0xa].EDX[0..4]) */
46#define PMU_FIXED_NR_SHIFT 0
47#define PMU_FIXED_NR_BITS 5
48#define PMU_FIXED_NR_MASK (((1 << PMU_FIXED_NR_BITS) - 1) \
49 << PMU_FIXED_NR_SHIFT)
50
51/* Alias registers (0x4c1) for full-width writes to PMCs */
52#define MSR_PMC_ALIAS_MASK (~(MSR_IA32_PERFCTR0 ^ MSR_IA32_PMC0))
53
54static __read_mostly int intel_num_arch_counters, intel_num_fixed_counters;
55
56
57static void xen_pmu_arch_init(void)
58{
59 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
60
61 switch (boot_cpu_data.x86) {
62 case 0x15:
63 amd_num_counters = F15H_NUM_COUNTERS;
64 amd_counters_base = MSR_F15H_PERF_CTR;
65 amd_ctrls_base = MSR_F15H_PERF_CTL;
66 amd_msr_step = 2;
67 k7_counters_mirrored = 1;
68 break;
69 case 0x10:
70 case 0x12:
71 case 0x14:
72 case 0x16:
73 default:
74 amd_num_counters = F10H_NUM_COUNTERS;
75 amd_counters_base = MSR_K7_PERFCTR0;
76 amd_ctrls_base = MSR_K7_EVNTSEL0;
77 amd_msr_step = 1;
78 k7_counters_mirrored = 0;
79 break;
80 }
81 } else {
82 uint32_t eax, ebx, ecx, edx;
83
84 cpuid(0xa, &eax, &ebx, &ecx, &edx);
85
86 intel_num_arch_counters = (eax & PMU_GENERAL_NR_MASK) >>
87 PMU_GENERAL_NR_SHIFT;
88 intel_num_fixed_counters = (edx & PMU_FIXED_NR_MASK) >>
89 PMU_FIXED_NR_SHIFT;
90 }
91}
92
93static inline uint32_t get_fam15h_addr(u32 addr)
94{
95 switch (addr) {
96 case MSR_K7_PERFCTR0:
97 case MSR_K7_PERFCTR1:
98 case MSR_K7_PERFCTR2:
99 case MSR_K7_PERFCTR3:
100 return MSR_F15H_PERF_CTR + (addr - MSR_K7_PERFCTR0);
101 case MSR_K7_EVNTSEL0:
102 case MSR_K7_EVNTSEL1:
103 case MSR_K7_EVNTSEL2:
104 case MSR_K7_EVNTSEL3:
105 return MSR_F15H_PERF_CTL + (addr - MSR_K7_EVNTSEL0);
106 default:
107 break;
108 }
109
110 return addr;
111}
112
113static inline bool is_amd_pmu_msr(unsigned int msr)
114{
115 if ((msr >= MSR_F15H_PERF_CTL &&
116 msr < MSR_F15H_PERF_CTR + (amd_num_counters * 2)) ||
117 (msr >= MSR_K7_EVNTSEL0 &&
118 msr < MSR_K7_PERFCTR0 + amd_num_counters))
119 return true;
120
121 return false;
122}
123
124static int is_intel_pmu_msr(u32 msr_index, int *type, int *index)
125{
126 u32 msr_index_pmc;
127
128 switch (msr_index) {
129 case MSR_CORE_PERF_FIXED_CTR_CTRL:
130 case MSR_IA32_DS_AREA:
131 case MSR_IA32_PEBS_ENABLE:
132 *type = MSR_TYPE_CTRL;
133 return true;
134
135 case MSR_CORE_PERF_GLOBAL_CTRL:
136 case MSR_CORE_PERF_GLOBAL_STATUS:
137 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
138 *type = MSR_TYPE_GLOBAL;
139 return true;
140
141 default:
142
143 if ((msr_index >= MSR_CORE_PERF_FIXED_CTR0) &&
144 (msr_index < MSR_CORE_PERF_FIXED_CTR0 +
145 intel_num_fixed_counters)) {
146 *index = msr_index - MSR_CORE_PERF_FIXED_CTR0;
147 *type = MSR_TYPE_COUNTER;
148 return true;
149 }
150
151 if ((msr_index >= MSR_P6_EVNTSEL0) &&
152 (msr_index < MSR_P6_EVNTSEL0 + intel_num_arch_counters)) {
153 *index = msr_index - MSR_P6_EVNTSEL0;
154 *type = MSR_TYPE_ARCH_CTRL;
155 return true;
156 }
157
158 msr_index_pmc = msr_index & MSR_PMC_ALIAS_MASK;
159 if ((msr_index_pmc >= MSR_IA32_PERFCTR0) &&
160 (msr_index_pmc < MSR_IA32_PERFCTR0 +
161 intel_num_arch_counters)) {
162 *type = MSR_TYPE_ARCH_COUNTER;
163 *index = msr_index_pmc - MSR_IA32_PERFCTR0;
164 return true;
165 }
166 return false;
167 }
168}
169
65d0cf0b
BO
170/* perf callbacks */
171static int xen_is_in_guest(void)
172{
173 const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
174
175 if (!xenpmu_data) {
176 pr_warn_once("%s: pmudata not initialized\n", __func__);
177 return 0;
178 }
179
180 if (!xen_initial_domain() || (xenpmu_data->domain_id >= DOMID_SELF))
181 return 0;
182
183 return 1;
184}
185
186static int xen_is_user_mode(void)
187{
188 const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
189
190 if (!xenpmu_data) {
191 pr_warn_once("%s: pmudata not initialized\n", __func__);
192 return 0;
193 }
194
195 if (xenpmu_data->pmu.pmu_flags & PMU_SAMPLE_PV)
196 return (xenpmu_data->pmu.pmu_flags & PMU_SAMPLE_USER);
197 else
198 return !!(xenpmu_data->pmu.r.regs.cpl & 3);
199}
200
201static unsigned long xen_get_guest_ip(void)
202{
203 const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
204
205 if (!xenpmu_data) {
206 pr_warn_once("%s: pmudata not initialized\n", __func__);
207 return 0;
208 }
209
210 return xenpmu_data->pmu.r.regs.ip;
211}
212
213static struct perf_guest_info_callbacks xen_guest_cbs = {
214 .is_in_guest = xen_is_in_guest,
215 .is_user_mode = xen_is_user_mode,
216 .get_guest_ip = xen_get_guest_ip,
217};
218
219/* Convert registers from Xen's format to Linux' */
220static void xen_convert_regs(const struct xen_pmu_regs *xen_regs,
221 struct pt_regs *regs, uint64_t pmu_flags)
222{
223 regs->ip = xen_regs->ip;
224 regs->cs = xen_regs->cs;
225 regs->sp = xen_regs->sp;
226
227 if (pmu_flags & PMU_SAMPLE_PV) {
228 if (pmu_flags & PMU_SAMPLE_USER)
229 regs->cs |= 3;
230 else
231 regs->cs &= ~3;
232 } else {
233 if (xen_regs->cpl)
234 regs->cs |= 3;
235 else
236 regs->cs &= ~3;
237 }
238}
239
240irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id)
241{
242 int ret = IRQ_NONE;
243 struct pt_regs regs;
244 const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
245
246 if (!xenpmu_data) {
247 pr_warn_once("%s: pmudata not initialized\n", __func__);
248 return ret;
249 }
250
251 xen_convert_regs(&xenpmu_data->pmu.r.regs, &regs,
252 xenpmu_data->pmu.pmu_flags);
253 if (x86_pmu.handle_irq(&regs))
254 ret = IRQ_HANDLED;
255
256 return ret;
257}
258
259bool is_xen_pmu(int cpu)
260{
261 return (per_cpu(xenpmu_shared, cpu) != NULL);
262}
263
264void xen_pmu_init(int cpu)
265{
266 int err;
267 struct xen_pmu_params xp;
268 unsigned long pfn;
269 struct xen_pmu_data *xenpmu_data;
270
271 BUILD_BUG_ON(sizeof(struct xen_pmu_data) > PAGE_SIZE);
272
273 if (xen_hvm_domain())
274 return;
275
276 xenpmu_data = (struct xen_pmu_data *)get_zeroed_page(GFP_KERNEL);
277 if (!xenpmu_data) {
278 pr_err("VPMU init: No memory\n");
279 return;
280 }
281 pfn = virt_to_pfn(xenpmu_data);
282
283 xp.val = pfn_to_mfn(pfn);
284 xp.vcpu = cpu;
285 xp.version.maj = XENPMU_VER_MAJ;
286 xp.version.min = XENPMU_VER_MIN;
287 err = HYPERVISOR_xenpmu_op(XENPMU_init, &xp);
288 if (err)
289 goto fail;
290
291 per_cpu(xenpmu_shared, cpu) = xenpmu_data;
292
e27b72df 293 if (cpu == 0) {
65d0cf0b 294 perf_register_guest_info_callbacks(&xen_guest_cbs);
e27b72df
BO
295 xen_pmu_arch_init();
296 }
65d0cf0b
BO
297
298 return;
299
300fail:
301 pr_warn_once("Could not initialize VPMU for cpu %d, error %d\n",
302 cpu, err);
303 free_pages((unsigned long)xenpmu_data, 0);
304}
305
306void xen_pmu_finish(int cpu)
307{
308 struct xen_pmu_params xp;
309
310 if (xen_hvm_domain())
311 return;
312
313 xp.vcpu = cpu;
314 xp.version.maj = XENPMU_VER_MAJ;
315 xp.version.min = XENPMU_VER_MIN;
316
317 (void)HYPERVISOR_xenpmu_op(XENPMU_finish, &xp);
318
319 free_pages((unsigned long)per_cpu(xenpmu_shared, cpu), 0);
320 per_cpu(xenpmu_shared, cpu) = NULL;
321}
This page took 0.039598 seconds and 5 git commands to generate.