Commit | Line | Data |
---|---|---|
051ff581 SZ |
1 | /* |
2 | * Copyright (C) 2015 Linaro Ltd. | |
3 | * Author: Shannon Zhao <shannon.zhao@linaro.org> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | */ | |
17 | ||
18 | #include <linux/cpu.h> | |
19 | #include <linux/kvm.h> | |
20 | #include <linux/kvm_host.h> | |
21 | #include <linux/perf_event.h> | |
bb0c70bc | 22 | #include <linux/uaccess.h> |
051ff581 SZ |
23 | #include <asm/kvm_emulate.h> |
24 | #include <kvm/arm_pmu.h> | |
b02386eb | 25 | #include <kvm/arm_vgic.h> |
051ff581 SZ |
26 | |
27 | /** | |
28 | * kvm_pmu_get_counter_value - get PMU counter value | |
29 | * @vcpu: The vcpu pointer | |
30 | * @select_idx: The counter index | |
31 | */ | |
32 | u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx) | |
33 | { | |
34 | u64 counter, reg, enabled, running; | |
35 | struct kvm_pmu *pmu = &vcpu->arch.pmu; | |
36 | struct kvm_pmc *pmc = &pmu->pmc[select_idx]; | |
37 | ||
38 | reg = (select_idx == ARMV8_PMU_CYCLE_IDX) | |
39 | ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx; | |
40 | counter = vcpu_sys_reg(vcpu, reg); | |
41 | ||
42 | /* The real counter value is equal to the value of counter register plus | |
43 | * the value perf event counts. | |
44 | */ | |
45 | if (pmc->perf_event) | |
46 | counter += perf_event_read_value(pmc->perf_event, &enabled, | |
47 | &running); | |
48 | ||
49 | return counter & pmc->bitmask; | |
50 | } | |
51 | ||
52 | /** | |
53 | * kvm_pmu_set_counter_value - set PMU counter value | |
54 | * @vcpu: The vcpu pointer | |
55 | * @select_idx: The counter index | |
56 | * @val: The counter value | |
57 | */ | |
58 | void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) | |
59 | { | |
60 | u64 reg; | |
61 | ||
62 | reg = (select_idx == ARMV8_PMU_CYCLE_IDX) | |
63 | ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx; | |
64 | vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx); | |
65 | } | |
96b0eebc | 66 | |
7f766358 SZ |
67 | /** |
68 | * kvm_pmu_stop_counter - stop PMU counter | |
69 | * @pmc: The PMU counter pointer | |
70 | * | |
71 | * If this counter has been configured to monitor some event, release it here. | |
72 | */ | |
73 | static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc) | |
74 | { | |
75 | u64 counter, reg; | |
76 | ||
77 | if (pmc->perf_event) { | |
78 | counter = kvm_pmu_get_counter_value(vcpu, pmc->idx); | |
79 | reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX) | |
80 | ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx; | |
81 | vcpu_sys_reg(vcpu, reg) = counter; | |
82 | perf_event_disable(pmc->perf_event); | |
83 | perf_event_release_kernel(pmc->perf_event); | |
84 | pmc->perf_event = NULL; | |
85 | } | |
86 | } | |
87 | ||
2aa36e98 SZ |
88 | /** |
89 | * kvm_pmu_vcpu_reset - reset pmu state for cpu | |
90 | * @vcpu: The vcpu pointer | |
91 | * | |
92 | */ | |
93 | void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) | |
94 | { | |
95 | int i; | |
96 | struct kvm_pmu *pmu = &vcpu->arch.pmu; | |
97 | ||
98 | for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) { | |
99 | kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]); | |
100 | pmu->pmc[i].idx = i; | |
101 | pmu->pmc[i].bitmask = 0xffffffffUL; | |
102 | } | |
103 | } | |
104 | ||
5f0a714a SZ |
105 | /** |
106 | * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu | |
107 | * @vcpu: The vcpu pointer | |
108 | * | |
109 | */ | |
110 | void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) | |
111 | { | |
112 | int i; | |
113 | struct kvm_pmu *pmu = &vcpu->arch.pmu; | |
114 | ||
115 | for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) { | |
116 | struct kvm_pmc *pmc = &pmu->pmc[i]; | |
117 | ||
118 | if (pmc->perf_event) { | |
119 | perf_event_disable(pmc->perf_event); | |
120 | perf_event_release_kernel(pmc->perf_event); | |
121 | pmc->perf_event = NULL; | |
122 | } | |
123 | } | |
124 | } | |
125 | ||
96b0eebc SZ |
126 | u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) |
127 | { | |
128 | u64 val = vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT; | |
129 | ||
130 | val &= ARMV8_PMU_PMCR_N_MASK; | |
131 | if (val == 0) | |
132 | return BIT(ARMV8_PMU_CYCLE_IDX); | |
133 | else | |
134 | return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX); | |
135 | } | |
136 | ||
137 | /** | |
138 | * kvm_pmu_enable_counter - enable selected PMU counter | |
139 | * @vcpu: The vcpu pointer | |
140 | * @val: the value guest writes to PMCNTENSET register | |
141 | * | |
142 | * Call perf_event_enable to start counting the perf event | |
143 | */ | |
144 | void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) | |
145 | { | |
146 | int i; | |
147 | struct kvm_pmu *pmu = &vcpu->arch.pmu; | |
148 | struct kvm_pmc *pmc; | |
149 | ||
150 | if (!(vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val) | |
151 | return; | |
152 | ||
153 | for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) { | |
154 | if (!(val & BIT(i))) | |
155 | continue; | |
156 | ||
157 | pmc = &pmu->pmc[i]; | |
158 | if (pmc->perf_event) { | |
159 | perf_event_enable(pmc->perf_event); | |
160 | if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE) | |
161 | kvm_debug("fail to enable perf event\n"); | |
162 | } | |
163 | } | |
164 | } | |
165 | ||
166 | /** | |
167 | * kvm_pmu_disable_counter - disable selected PMU counter | |
168 | * @vcpu: The vcpu pointer | |
169 | * @val: the value guest writes to PMCNTENCLR register | |
170 | * | |
171 | * Call perf_event_disable to stop counting the perf event | |
172 | */ | |
173 | void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) | |
174 | { | |
175 | int i; | |
176 | struct kvm_pmu *pmu = &vcpu->arch.pmu; | |
177 | struct kvm_pmc *pmc; | |
178 | ||
179 | if (!val) | |
180 | return; | |
181 | ||
182 | for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) { | |
183 | if (!(val & BIT(i))) | |
184 | continue; | |
185 | ||
186 | pmc = &pmu->pmc[i]; | |
187 | if (pmc->perf_event) | |
188 | perf_event_disable(pmc->perf_event); | |
189 | } | |
190 | } | |
7f766358 | 191 | |
76d883c4 SZ |
192 | static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu) |
193 | { | |
194 | u64 reg = 0; | |
195 | ||
7d4bd1d2 | 196 | if ((vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) { |
76d883c4 SZ |
197 | reg = vcpu_sys_reg(vcpu, PMOVSSET_EL0); |
198 | reg &= vcpu_sys_reg(vcpu, PMCNTENSET_EL0); | |
199 | reg &= vcpu_sys_reg(vcpu, PMINTENSET_EL1); | |
200 | reg &= kvm_pmu_valid_counter_mask(vcpu); | |
7d4bd1d2 | 201 | } |
76d883c4 SZ |
202 | |
203 | return reg; | |
204 | } | |
205 | ||
206 | /** | |
207 | * kvm_pmu_overflow_set - set PMU overflow interrupt | |
208 | * @vcpu: The vcpu pointer | |
209 | * @val: the value guest writes to PMOVSSET register | |
210 | */ | |
211 | void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) | |
212 | { | |
213 | u64 reg; | |
214 | ||
215 | if (val == 0) | |
216 | return; | |
217 | ||
218 | vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= val; | |
219 | reg = kvm_pmu_overflow_status(vcpu); | |
220 | if (reg != 0) | |
221 | kvm_vcpu_kick(vcpu); | |
222 | } | |
223 | ||
b02386eb SZ |
224 | static void kvm_pmu_update_state(struct kvm_vcpu *vcpu) |
225 | { | |
226 | struct kvm_pmu *pmu = &vcpu->arch.pmu; | |
227 | bool overflow; | |
228 | ||
229 | if (!kvm_arm_pmu_v3_ready(vcpu)) | |
230 | return; | |
231 | ||
232 | overflow = !!kvm_pmu_overflow_status(vcpu); | |
233 | if (pmu->irq_level != overflow) { | |
234 | pmu->irq_level = overflow; | |
235 | kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, | |
236 | pmu->irq_num, overflow); | |
237 | } | |
238 | } | |
239 | ||
240 | /** | |
241 | * kvm_pmu_flush_hwstate - flush pmu state to cpu | |
242 | * @vcpu: The vcpu pointer | |
243 | * | |
244 | * Check if the PMU has overflowed while we were running in the host, and inject | |
245 | * an interrupt if that was the case. | |
246 | */ | |
247 | void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) | |
248 | { | |
249 | kvm_pmu_update_state(vcpu); | |
250 | } | |
251 | ||
252 | /** | |
253 | * kvm_pmu_sync_hwstate - sync pmu state from cpu | |
254 | * @vcpu: The vcpu pointer | |
255 | * | |
256 | * Check if the PMU has overflowed while we were running in the guest, and | |
257 | * inject an interrupt if that was the case. | |
258 | */ | |
259 | void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) | |
260 | { | |
261 | kvm_pmu_update_state(vcpu); | |
262 | } | |
263 | ||
264 | static inline struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc) | |
265 | { | |
266 | struct kvm_pmu *pmu; | |
267 | struct kvm_vcpu_arch *vcpu_arch; | |
268 | ||
269 | pmc -= pmc->idx; | |
270 | pmu = container_of(pmc, struct kvm_pmu, pmc[0]); | |
271 | vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu); | |
272 | return container_of(vcpu_arch, struct kvm_vcpu, arch); | |
273 | } | |
274 | ||
275 | /** | |
276 | * When perf event overflows, call kvm_pmu_overflow_set to set overflow status. | |
277 | */ | |
278 | static void kvm_pmu_perf_overflow(struct perf_event *perf_event, | |
279 | struct perf_sample_data *data, | |
280 | struct pt_regs *regs) | |
281 | { | |
282 | struct kvm_pmc *pmc = perf_event->overflow_handler_context; | |
283 | struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); | |
284 | int idx = pmc->idx; | |
285 | ||
286 | kvm_pmu_overflow_set(vcpu, BIT(idx)); | |
287 | } | |
288 | ||
7a0adc70 SZ |
289 | /** |
290 | * kvm_pmu_software_increment - do software increment | |
291 | * @vcpu: The vcpu pointer | |
292 | * @val: the value guest writes to PMSWINC register | |
293 | */ | |
294 | void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) | |
295 | { | |
296 | int i; | |
297 | u64 type, enable, reg; | |
298 | ||
299 | if (val == 0) | |
300 | return; | |
301 | ||
302 | enable = vcpu_sys_reg(vcpu, PMCNTENSET_EL0); | |
303 | for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) { | |
304 | if (!(val & BIT(i))) | |
305 | continue; | |
306 | type = vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i) | |
307 | & ARMV8_PMU_EVTYPE_EVENT; | |
308 | if ((type == ARMV8_PMU_EVTYPE_EVENT_SW_INCR) | |
309 | && (enable & BIT(i))) { | |
310 | reg = vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1; | |
311 | reg = lower_32_bits(reg); | |
312 | vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg; | |
313 | if (!reg) | |
314 | kvm_pmu_overflow_set(vcpu, BIT(i)); | |
315 | } | |
316 | } | |
317 | } | |
318 | ||
76993739 SZ |
319 | /** |
320 | * kvm_pmu_handle_pmcr - handle PMCR register | |
321 | * @vcpu: The vcpu pointer | |
322 | * @val: the value guest writes to PMCR register | |
323 | */ | |
324 | void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) | |
325 | { | |
326 | struct kvm_pmu *pmu = &vcpu->arch.pmu; | |
327 | struct kvm_pmc *pmc; | |
328 | u64 mask; | |
329 | int i; | |
330 | ||
331 | mask = kvm_pmu_valid_counter_mask(vcpu); | |
332 | if (val & ARMV8_PMU_PMCR_E) { | |
333 | kvm_pmu_enable_counter(vcpu, | |
334 | vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask); | |
335 | } else { | |
336 | kvm_pmu_disable_counter(vcpu, mask); | |
337 | } | |
338 | ||
339 | if (val & ARMV8_PMU_PMCR_C) | |
340 | kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0); | |
341 | ||
342 | if (val & ARMV8_PMU_PMCR_P) { | |
343 | for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) | |
344 | kvm_pmu_set_counter_value(vcpu, i, 0); | |
345 | } | |
346 | ||
347 | if (val & ARMV8_PMU_PMCR_LC) { | |
348 | pmc = &pmu->pmc[ARMV8_PMU_CYCLE_IDX]; | |
349 | pmc->bitmask = 0xffffffffffffffffUL; | |
350 | } | |
351 | } | |
352 | ||
7f766358 SZ |
353 | static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx) |
354 | { | |
355 | return (vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) && | |
356 | (vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx)); | |
357 | } | |
358 | ||
359 | /** | |
360 | * kvm_pmu_set_counter_event_type - set selected counter to monitor some event | |
361 | * @vcpu: The vcpu pointer | |
362 | * @data: The data guest writes to PMXEVTYPER_EL0 | |
363 | * @select_idx: The number of selected counter | |
364 | * | |
365 | * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an | |
366 | * event with given hardware event number. Here we call perf_event API to | |
367 | * emulate this action and create a kernel perf event for it. | |
368 | */ | |
369 | void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, | |
370 | u64 select_idx) | |
371 | { | |
372 | struct kvm_pmu *pmu = &vcpu->arch.pmu; | |
373 | struct kvm_pmc *pmc = &pmu->pmc[select_idx]; | |
374 | struct perf_event *event; | |
375 | struct perf_event_attr attr; | |
376 | u64 eventsel, counter; | |
377 | ||
378 | kvm_pmu_stop_counter(vcpu, pmc); | |
379 | eventsel = data & ARMV8_PMU_EVTYPE_EVENT; | |
380 | ||
7a0adc70 SZ |
381 | /* Software increment event does't need to be backed by a perf event */ |
382 | if (eventsel == ARMV8_PMU_EVTYPE_EVENT_SW_INCR) | |
383 | return; | |
384 | ||
7f766358 SZ |
385 | memset(&attr, 0, sizeof(struct perf_event_attr)); |
386 | attr.type = PERF_TYPE_RAW; | |
387 | attr.size = sizeof(attr); | |
388 | attr.pinned = 1; | |
389 | attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, select_idx); | |
390 | attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0; | |
391 | attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0; | |
392 | attr.exclude_hv = 1; /* Don't count EL2 events */ | |
393 | attr.exclude_host = 1; /* Don't count host events */ | |
394 | attr.config = eventsel; | |
395 | ||
396 | counter = kvm_pmu_get_counter_value(vcpu, select_idx); | |
397 | /* The initial sample period (overflow count) of an event. */ | |
398 | attr.sample_period = (-counter) & pmc->bitmask; | |
399 | ||
b02386eb SZ |
400 | event = perf_event_create_kernel_counter(&attr, -1, current, |
401 | kvm_pmu_perf_overflow, pmc); | |
7f766358 SZ |
402 | if (IS_ERR(event)) { |
403 | pr_err_once("kvm: pmu event creation failed %ld\n", | |
404 | PTR_ERR(event)); | |
405 | return; | |
406 | } | |
407 | ||
408 | pmc->perf_event = event; | |
409 | } | |
808e7381 SZ |
410 | |
411 | bool kvm_arm_support_pmu_v3(void) | |
412 | { | |
413 | /* | |
414 | * Check if HW_PERF_EVENTS are supported by checking the number of | |
415 | * hardware performance counters. This could ensure the presence of | |
416 | * a physical PMU and CONFIG_PERF_EVENT is selected. | |
417 | */ | |
418 | return (perf_num_counters() > 0); | |
419 | } | |
bb0c70bc SZ |
420 | |
421 | static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu) | |
422 | { | |
423 | if (!kvm_arm_support_pmu_v3()) | |
424 | return -ENODEV; | |
425 | ||
426 | if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features) || | |
427 | !kvm_arm_pmu_irq_initialized(vcpu)) | |
428 | return -ENXIO; | |
429 | ||
430 | if (kvm_arm_pmu_v3_ready(vcpu)) | |
431 | return -EBUSY; | |
432 | ||
433 | kvm_pmu_vcpu_reset(vcpu); | |
434 | vcpu->arch.pmu.ready = true; | |
435 | ||
436 | return 0; | |
437 | } | |
438 | ||
2defaff4 AP |
439 | #define irq_is_ppi(irq) ((irq) >= VGIC_NR_SGIS && (irq) < VGIC_NR_PRIVATE_IRQS) |
440 | ||
441 | /* | |
442 | * For one VM the interrupt type must be same for each vcpu. | |
443 | * As a PPI, the interrupt number is the same for all vcpus, | |
444 | * while as an SPI it must be a separate number per vcpu. | |
445 | */ | |
446 | static bool pmu_irq_is_valid(struct kvm *kvm, int irq) | |
bb0c70bc SZ |
447 | { |
448 | int i; | |
449 | struct kvm_vcpu *vcpu; | |
450 | ||
451 | kvm_for_each_vcpu(i, vcpu, kvm) { | |
452 | if (!kvm_arm_pmu_irq_initialized(vcpu)) | |
453 | continue; | |
454 | ||
2defaff4 | 455 | if (irq_is_ppi(irq)) { |
bb0c70bc SZ |
456 | if (vcpu->arch.pmu.irq_num != irq) |
457 | return false; | |
458 | } else { | |
459 | if (vcpu->arch.pmu.irq_num == irq) | |
460 | return false; | |
461 | } | |
462 | } | |
463 | ||
464 | return true; | |
465 | } | |
466 | ||
bb0c70bc SZ |
467 | int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) |
468 | { | |
469 | switch (attr->attr) { | |
470 | case KVM_ARM_VCPU_PMU_V3_IRQ: { | |
471 | int __user *uaddr = (int __user *)(long)attr->addr; | |
472 | int irq; | |
473 | ||
474 | if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features)) | |
475 | return -ENODEV; | |
476 | ||
477 | if (get_user(irq, uaddr)) | |
478 | return -EFAULT; | |
479 | ||
2defaff4 AP |
480 | /* The PMU overflow interrupt can be a PPI or a valid SPI. */ |
481 | if (!(irq_is_ppi(irq) || vgic_valid_spi(vcpu->kvm, irq))) | |
482 | return -EINVAL; | |
483 | ||
484 | if (!pmu_irq_is_valid(vcpu->kvm, irq)) | |
bb0c70bc SZ |
485 | return -EINVAL; |
486 | ||
487 | if (kvm_arm_pmu_irq_initialized(vcpu)) | |
488 | return -EBUSY; | |
489 | ||
490 | kvm_debug("Set kvm ARM PMU irq: %d\n", irq); | |
491 | vcpu->arch.pmu.irq_num = irq; | |
492 | return 0; | |
493 | } | |
494 | case KVM_ARM_VCPU_PMU_V3_INIT: | |
495 | return kvm_arm_pmu_v3_init(vcpu); | |
496 | } | |
497 | ||
498 | return -ENXIO; | |
499 | } | |
500 | ||
501 | int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) | |
502 | { | |
503 | switch (attr->attr) { | |
504 | case KVM_ARM_VCPU_PMU_V3_IRQ: { | |
505 | int __user *uaddr = (int __user *)(long)attr->addr; | |
506 | int irq; | |
507 | ||
508 | if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features)) | |
509 | return -ENODEV; | |
510 | ||
511 | if (!kvm_arm_pmu_irq_initialized(vcpu)) | |
512 | return -ENXIO; | |
513 | ||
514 | irq = vcpu->arch.pmu.irq_num; | |
515 | return put_user(irq, uaddr); | |
516 | } | |
517 | } | |
518 | ||
519 | return -ENXIO; | |
520 | } | |
521 | ||
522 | int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) | |
523 | { | |
524 | switch (attr->attr) { | |
525 | case KVM_ARM_VCPU_PMU_V3_IRQ: | |
526 | case KVM_ARM_VCPU_PMU_V3_INIT: | |
527 | if (kvm_arm_support_pmu_v3() && | |
528 | test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features)) | |
529 | return 0; | |
530 | } | |
531 | ||
532 | return -ENXIO; | |
533 | } |