Commit | Line | Data |
---|---|---|
f5132b01 | 1 | /* |
c7a7062f | 2 | * Kernel-based Virtual Machine -- Performance Monitoring Unit support |
f5132b01 | 3 | * |
25462f7f | 4 | * Copyright 2015 Red Hat, Inc. and/or its affiliates. |
f5132b01 GN |
5 | * |
6 | * Authors: | |
7 | * Avi Kivity <avi@redhat.com> | |
8 | * Gleb Natapov <gleb@redhat.com> | |
25462f7f | 9 | * Wei Huang <wei@redhat.com> |
f5132b01 GN |
10 | * |
11 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
12 | * the COPYING file in the top-level directory. | |
13 | * | |
14 | */ | |
15 | ||
16 | #include <linux/types.h> | |
17 | #include <linux/kvm_host.h> | |
18 | #include <linux/perf_event.h> | |
d27aa7f1 | 19 | #include <asm/perf_event.h> |
f5132b01 GN |
20 | #include "x86.h" |
21 | #include "cpuid.h" | |
22 | #include "lapic.h" | |
474a5bb9 | 23 | #include "pmu.h" |
f5132b01 | 24 | |
25462f7f WH |
25 | /* NOTE: |
26 | * - Each perf counter is defined as "struct kvm_pmc"; | |
27 | * - There are two types of perf counters: general purpose (gp) and fixed. | |
28 | * gp counters are stored in gp_counters[] and fixed counters are stored | |
29 | * in fixed_counters[] respectively. Both of them are part of "struct | |
30 | * kvm_pmu"; | |
31 | * - pmu.c understands the difference between gp counters and fixed counters. | |
32 | * However AMD doesn't support fixed-counters; | |
33 | * - There are three types of index to access perf counters (PMC): | |
34 | * 1. MSR (named msr): For example Intel has MSR_IA32_PERFCTRn and AMD | |
35 | * has MSR_K7_PERFCTRn. | |
36 | * 2. MSR Index (named idx): This normally is used by RDPMC instruction. | |
37 | * For instance AMD RDPMC instruction uses 0000_0003h in ECX to access | |
38 | * C001_0007h (MSR_K7_PERCTR3). Intel has a similar mechanism, except | |
39 | * that it also supports fixed counters. idx can be used to as index to | |
40 | * gp and fixed counters. | |
41 | * 3. Global PMC Index (named pmc): pmc is an index specific to PMU | |
42 | * code. Each pmc, stored in kvm_pmc.idx field, is unique across | |
43 | * all perf counters (both gp and fixed). The mapping relationship | |
44 | * between pmc and perf counters is as the following: | |
45 | * * Intel: [0 .. INTEL_PMC_MAX_GENERIC-1] <=> gp counters | |
46 | * [INTEL_PMC_IDX_FIXED .. INTEL_PMC_IDX_FIXED + 2] <=> fixed | |
47 | * * AMD: [0 .. AMD64_NUM_COUNTERS-1] <=> gp counters | |
48 | */ | |
f5132b01 | 49 | |
c6702c9d | 50 | static void kvm_pmi_trigger_fn(struct irq_work *irq_work) |
f5132b01 | 51 | { |
212dba12 WH |
52 | struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work); |
53 | struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu); | |
f5132b01 | 54 | |
c6702c9d | 55 | kvm_pmu_deliver_pmi(vcpu); |
f5132b01 GN |
56 | } |
57 | ||
58 | static void kvm_perf_overflow(struct perf_event *perf_event, | |
59 | struct perf_sample_data *data, | |
60 | struct pt_regs *regs) | |
61 | { | |
62 | struct kvm_pmc *pmc = perf_event->overflow_handler_context; | |
212dba12 | 63 | struct kvm_pmu *pmu = pmc_to_pmu(pmc); |
e84cfe4c WH |
64 | |
65 | if (!test_and_set_bit(pmc->idx, | |
66 | (unsigned long *)&pmu->reprogram_pmi)) { | |
671bd993 NA |
67 | __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); |
68 | kvm_make_request(KVM_REQ_PMU, pmc->vcpu); | |
69 | } | |
f5132b01 GN |
70 | } |
71 | ||
72 | static void kvm_perf_overflow_intr(struct perf_event *perf_event, | |
e84cfe4c WH |
73 | struct perf_sample_data *data, |
74 | struct pt_regs *regs) | |
f5132b01 GN |
75 | { |
76 | struct kvm_pmc *pmc = perf_event->overflow_handler_context; | |
212dba12 | 77 | struct kvm_pmu *pmu = pmc_to_pmu(pmc); |
e84cfe4c WH |
78 | |
79 | if (!test_and_set_bit(pmc->idx, | |
80 | (unsigned long *)&pmu->reprogram_pmi)) { | |
671bd993 | 81 | __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); |
f5132b01 | 82 | kvm_make_request(KVM_REQ_PMU, pmc->vcpu); |
e84cfe4c | 83 | |
f5132b01 GN |
84 | /* |
85 | * Inject PMI. If vcpu was in a guest mode during NMI PMI | |
86 | * can be ejected on a guest mode re-entry. Otherwise we can't | |
87 | * be sure that vcpu wasn't executing hlt instruction at the | |
e84cfe4c | 88 | * time of vmexit and is not going to re-enter guest mode until |
f5132b01 GN |
89 | * woken up. So we should wake it, but this is impossible from |
90 | * NMI context. Do it from irq work instead. | |
91 | */ | |
92 | if (!kvm_is_in_guest()) | |
212dba12 | 93 | irq_work_queue(&pmc_to_pmu(pmc)->irq_work); |
f5132b01 GN |
94 | else |
95 | kvm_make_request(KVM_REQ_PMI, pmc->vcpu); | |
96 | } | |
97 | } | |
98 | ||
c6702c9d | 99 | static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, |
e84cfe4c WH |
100 | unsigned config, bool exclude_user, |
101 | bool exclude_kernel, bool intr, | |
102 | bool in_tx, bool in_tx_cp) | |
f5132b01 GN |
103 | { |
104 | struct perf_event *event; | |
105 | struct perf_event_attr attr = { | |
106 | .type = type, | |
107 | .size = sizeof(attr), | |
108 | .pinned = true, | |
109 | .exclude_idle = true, | |
110 | .exclude_host = 1, | |
111 | .exclude_user = exclude_user, | |
112 | .exclude_kernel = exclude_kernel, | |
113 | .config = config, | |
114 | }; | |
e84cfe4c | 115 | |
103af0a9 AK |
116 | if (in_tx) |
117 | attr.config |= HSW_IN_TX; | |
118 | if (in_tx_cp) | |
119 | attr.config |= HSW_IN_TX_CHECKPOINTED; | |
f5132b01 GN |
120 | |
121 | attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc); | |
122 | ||
123 | event = perf_event_create_kernel_counter(&attr, -1, current, | |
124 | intr ? kvm_perf_overflow_intr : | |
125 | kvm_perf_overflow, pmc); | |
126 | if (IS_ERR(event)) { | |
e84cfe4c WH |
127 | printk_once("kvm_pmu: event creation failed %ld\n", |
128 | PTR_ERR(event)); | |
f5132b01 GN |
129 | return; |
130 | } | |
131 | ||
132 | pmc->perf_event = event; | |
212dba12 | 133 | clear_bit(pmc->idx, (unsigned long*)&pmc_to_pmu(pmc)->reprogram_pmi); |
f5132b01 GN |
134 | } |
135 | ||
25462f7f | 136 | void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) |
f5132b01 GN |
137 | { |
138 | unsigned config, type = PERF_TYPE_RAW; | |
139 | u8 event_select, unit_mask; | |
140 | ||
a7b9d2cc GN |
141 | if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL) |
142 | printk_once("kvm pmu: pin control bit is ignored\n"); | |
143 | ||
f5132b01 GN |
144 | pmc->eventsel = eventsel; |
145 | ||
c6702c9d | 146 | pmc_stop_counter(pmc); |
f5132b01 | 147 | |
c6702c9d | 148 | if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc)) |
f5132b01 GN |
149 | return; |
150 | ||
151 | event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT; | |
152 | unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8; | |
153 | ||
fac33683 | 154 | if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE | |
e84cfe4c WH |
155 | ARCH_PERFMON_EVENTSEL_INV | |
156 | ARCH_PERFMON_EVENTSEL_CMASK | | |
157 | HSW_IN_TX | | |
158 | HSW_IN_TX_CHECKPOINTED))) { | |
25462f7f WH |
159 | config = kvm_x86_ops->pmu_ops->find_arch_event(pmc_to_pmu(pmc), |
160 | event_select, | |
161 | unit_mask); | |
f5132b01 GN |
162 | if (config != PERF_COUNT_HW_MAX) |
163 | type = PERF_TYPE_HARDWARE; | |
164 | } | |
165 | ||
166 | if (type == PERF_TYPE_RAW) | |
167 | config = eventsel & X86_RAW_EVENT_MASK; | |
168 | ||
c6702c9d | 169 | pmc_reprogram_counter(pmc, type, config, |
e84cfe4c WH |
170 | !(eventsel & ARCH_PERFMON_EVENTSEL_USR), |
171 | !(eventsel & ARCH_PERFMON_EVENTSEL_OS), | |
172 | eventsel & ARCH_PERFMON_EVENTSEL_INT, | |
173 | (eventsel & HSW_IN_TX), | |
174 | (eventsel & HSW_IN_TX_CHECKPOINTED)); | |
f5132b01 | 175 | } |
25462f7f | 176 | EXPORT_SYMBOL_GPL(reprogram_gp_counter); |
f5132b01 | 177 | |
25462f7f | 178 | void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx) |
f5132b01 | 179 | { |
e84cfe4c WH |
180 | unsigned en_field = ctrl & 0x3; |
181 | bool pmi = ctrl & 0x8; | |
f5132b01 | 182 | |
c6702c9d | 183 | pmc_stop_counter(pmc); |
f5132b01 | 184 | |
e84cfe4c | 185 | if (!en_field || !pmc_is_enabled(pmc)) |
f5132b01 GN |
186 | return; |
187 | ||
c6702c9d | 188 | pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE, |
25462f7f | 189 | kvm_x86_ops->pmu_ops->find_fixed_event(idx), |
e84cfe4c WH |
190 | !(en_field & 0x2), /* exclude user */ |
191 | !(en_field & 0x1), /* exclude kernel */ | |
192 | pmi, false, false); | |
f5132b01 | 193 | } |
25462f7f | 194 | EXPORT_SYMBOL_GPL(reprogram_fixed_counter); |
f5132b01 | 195 | |
25462f7f | 196 | void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx) |
f5132b01 | 197 | { |
25462f7f | 198 | struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx); |
f5132b01 GN |
199 | |
200 | if (!pmc) | |
201 | return; | |
202 | ||
203 | if (pmc_is_gp(pmc)) | |
204 | reprogram_gp_counter(pmc, pmc->eventsel); | |
205 | else { | |
e84cfe4c WH |
206 | int idx = pmc_idx - INTEL_PMC_IDX_FIXED; |
207 | u8 ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, idx); | |
208 | ||
209 | reprogram_fixed_counter(pmc, ctrl, idx); | |
f5132b01 GN |
210 | } |
211 | } | |
25462f7f | 212 | EXPORT_SYMBOL_GPL(reprogram_counter); |
f5132b01 | 213 | |
e5af058a WH |
214 | void kvm_pmu_handle_event(struct kvm_vcpu *vcpu) |
215 | { | |
216 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); | |
217 | u64 bitmask; | |
218 | int bit; | |
219 | ||
220 | bitmask = pmu->reprogram_pmi; | |
221 | ||
222 | for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) { | |
25462f7f | 223 | struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, bit); |
e5af058a WH |
224 | |
225 | if (unlikely(!pmc || !pmc->perf_event)) { | |
226 | clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi); | |
227 | continue; | |
228 | } | |
229 | ||
230 | reprogram_counter(pmu, bit); | |
231 | } | |
232 | } | |
233 | ||
234 | /* check if idx is a valid index to access PMU */ | |
235 | int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx) | |
236 | { | |
25462f7f | 237 | return kvm_x86_ops->pmu_ops->is_valid_msr_idx(vcpu, idx); |
41aac14a WH |
238 | } |
239 | ||
240 | int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data) | |
241 | { | |
242 | bool fast_mode = idx & (1u << 31); | |
243 | struct kvm_pmc *pmc; | |
244 | u64 ctr_val; | |
245 | ||
25462f7f | 246 | pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx); |
41aac14a WH |
247 | if (!pmc) |
248 | return 1; | |
249 | ||
250 | ctr_val = pmc_read_counter(pmc); | |
e5af058a WH |
251 | if (fast_mode) |
252 | ctr_val = (u32)ctr_val; | |
253 | ||
254 | *data = ctr_val; | |
255 | return 0; | |
256 | } | |
257 | ||
258 | void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu) | |
259 | { | |
bce87cce | 260 | if (lapic_in_kernel(vcpu)) |
e5af058a WH |
261 | kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC); |
262 | } | |
263 | ||
c6702c9d | 264 | bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) |
f5132b01 | 265 | { |
25462f7f | 266 | return kvm_x86_ops->pmu_ops->is_valid_msr(vcpu, msr); |
f5132b01 GN |
267 | } |
268 | ||
25462f7f | 269 | int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data) |
f5132b01 | 270 | { |
25462f7f | 271 | return kvm_x86_ops->pmu_ops->get_msr(vcpu, msr, data); |
f5132b01 GN |
272 | } |
273 | ||
afd80d85 | 274 | int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
f5132b01 | 275 | { |
25462f7f | 276 | return kvm_x86_ops->pmu_ops->set_msr(vcpu, msr_info); |
f5132b01 GN |
277 | } |
278 | ||
e84cfe4c WH |
279 | /* refresh PMU settings. This function generally is called when underlying |
280 | * settings are changed (such as changes of PMU CPUID by guest VMs), which | |
281 | * should rarely happen. | |
282 | */ | |
c6702c9d | 283 | void kvm_pmu_refresh(struct kvm_vcpu *vcpu) |
f5132b01 | 284 | { |
25462f7f | 285 | kvm_x86_ops->pmu_ops->refresh(vcpu); |
f5132b01 GN |
286 | } |
287 | ||
f5132b01 GN |
288 | void kvm_pmu_reset(struct kvm_vcpu *vcpu) |
289 | { | |
212dba12 | 290 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
f5132b01 GN |
291 | |
292 | irq_work_sync(&pmu->irq_work); | |
25462f7f | 293 | kvm_x86_ops->pmu_ops->reset(vcpu); |
f5132b01 GN |
294 | } |
295 | ||
e5af058a | 296 | void kvm_pmu_init(struct kvm_vcpu *vcpu) |
f5132b01 | 297 | { |
212dba12 | 298 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
f5132b01 | 299 | |
e5af058a | 300 | memset(pmu, 0, sizeof(*pmu)); |
25462f7f | 301 | kvm_x86_ops->pmu_ops->init(vcpu); |
e5af058a WH |
302 | init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn); |
303 | kvm_pmu_refresh(vcpu); | |
304 | } | |
305 | ||
306 | void kvm_pmu_destroy(struct kvm_vcpu *vcpu) | |
307 | { | |
308 | kvm_pmu_reset(vcpu); | |
f5132b01 | 309 | } |