4 * Copyright (C) 2012 ARM Limited
5 * Author: Will Deacon <will.deacon@arm.com>
7 * This code is based heavily on the ARMv7 perf event code.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #define pr_fmt(fmt) "hw perfevents: " fmt
23 #include <linux/bitmap.h>
24 #include <linux/interrupt.h>
25 #include <linux/irq.h>
26 #include <linux/kernel.h>
27 #include <linux/export.h>
28 #include <linux/perf_event.h>
29 #include <linux/platform_device.h>
30 #include <linux/spinlock.h>
31 #include <linux/uaccess.h>
33 #include <asm/cputype.h>
35 #include <asm/irq_regs.h>
37 #include <asm/stacktrace.h>
40 * ARMv8 supports a maximum of 32 events.
41 * The cycle counter is included in this total.
43 #define ARMPMU_MAX_HWEVENTS 32
45 static DEFINE_PER_CPU(struct perf_event
* [ARMPMU_MAX_HWEVENTS
], hw_events
);
46 static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS
)], used_mask
);
47 static DEFINE_PER_CPU(struct pmu_hw_events
, cpu_hw_events
);
49 #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
51 /* Set at runtime when we know what CPU type we are. */
52 static struct arm_pmu
*cpu_pmu
;
55 armpmu_get_max_events(void)
60 max_events
= cpu_pmu
->num_events
;
64 EXPORT_SYMBOL_GPL(armpmu_get_max_events
);
66 int perf_num_counters(void)
68 return armpmu_get_max_events();
70 EXPORT_SYMBOL_GPL(perf_num_counters
);
72 #define HW_OP_UNSUPPORTED 0xFFFF
75 PERF_COUNT_HW_CACHE_##_x
77 #define CACHE_OP_UNSUPPORTED 0xFFFF
80 armpmu_map_cache_event(const unsigned (*cache_map
)
81 [PERF_COUNT_HW_CACHE_MAX
]
82 [PERF_COUNT_HW_CACHE_OP_MAX
]
83 [PERF_COUNT_HW_CACHE_RESULT_MAX
],
86 unsigned int cache_type
, cache_op
, cache_result
, ret
;
88 cache_type
= (config
>> 0) & 0xff;
89 if (cache_type
>= PERF_COUNT_HW_CACHE_MAX
)
92 cache_op
= (config
>> 8) & 0xff;
93 if (cache_op
>= PERF_COUNT_HW_CACHE_OP_MAX
)
96 cache_result
= (config
>> 16) & 0xff;
97 if (cache_result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
100 ret
= (int)(*cache_map
)[cache_type
][cache_op
][cache_result
];
102 if (ret
== CACHE_OP_UNSUPPORTED
)
109 armpmu_map_event(const unsigned (*event_map
)[PERF_COUNT_HW_MAX
], u64 config
)
113 if (config
>= PERF_COUNT_HW_MAX
)
116 mapping
= (*event_map
)[config
];
117 return mapping
== HW_OP_UNSUPPORTED
? -ENOENT
: mapping
;
121 armpmu_map_raw_event(u32 raw_event_mask
, u64 config
)
123 return (int)(config
& raw_event_mask
);
126 static int map_cpu_event(struct perf_event
*event
,
127 const unsigned (*event_map
)[PERF_COUNT_HW_MAX
],
128 const unsigned (*cache_map
)
129 [PERF_COUNT_HW_CACHE_MAX
]
130 [PERF_COUNT_HW_CACHE_OP_MAX
]
131 [PERF_COUNT_HW_CACHE_RESULT_MAX
],
134 u64 config
= event
->attr
.config
;
136 switch (event
->attr
.type
) {
137 case PERF_TYPE_HARDWARE
:
138 return armpmu_map_event(event_map
, config
);
139 case PERF_TYPE_HW_CACHE
:
140 return armpmu_map_cache_event(cache_map
, config
);
142 return armpmu_map_raw_event(raw_event_mask
, config
);
149 armpmu_event_set_period(struct perf_event
*event
,
150 struct hw_perf_event
*hwc
,
153 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
154 s64 left
= local64_read(&hwc
->period_left
);
155 s64 period
= hwc
->sample_period
;
158 if (unlikely(left
<= -period
)) {
160 local64_set(&hwc
->period_left
, left
);
161 hwc
->last_period
= period
;
165 if (unlikely(left
<= 0)) {
167 local64_set(&hwc
->period_left
, left
);
168 hwc
->last_period
= period
;
173 * Limit the maximum period to prevent the counter value
174 * from overtaking the one we are about to program. In
175 * effect we are reducing max_period to account for
176 * interrupt latency (and we are being very conservative).
178 if (left
> (armpmu
->max_period
>> 1))
179 left
= armpmu
->max_period
>> 1;
181 local64_set(&hwc
->prev_count
, (u64
)-left
);
183 armpmu
->write_counter(idx
, (u64
)(-left
) & 0xffffffff);
185 perf_event_update_userpage(event
);
191 armpmu_event_update(struct perf_event
*event
,
192 struct hw_perf_event
*hwc
,
195 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
196 u64 delta
, prev_raw_count
, new_raw_count
;
199 prev_raw_count
= local64_read(&hwc
->prev_count
);
200 new_raw_count
= armpmu
->read_counter(idx
);
202 if (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
203 new_raw_count
) != prev_raw_count
)
206 delta
= (new_raw_count
- prev_raw_count
) & armpmu
->max_period
;
208 local64_add(delta
, &event
->count
);
209 local64_sub(delta
, &hwc
->period_left
);
211 return new_raw_count
;
215 armpmu_read(struct perf_event
*event
)
217 struct hw_perf_event
*hwc
= &event
->hw
;
219 /* Don't read disabled counters! */
223 armpmu_event_update(event
, hwc
, hwc
->idx
);
227 armpmu_stop(struct perf_event
*event
, int flags
)
229 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
230 struct hw_perf_event
*hwc
= &event
->hw
;
233 * ARM pmu always has to update the counter, so ignore
234 * PERF_EF_UPDATE, see comments in armpmu_start().
236 if (!(hwc
->state
& PERF_HES_STOPPED
)) {
237 armpmu
->disable(hwc
, hwc
->idx
);
238 barrier(); /* why? */
239 armpmu_event_update(event
, hwc
, hwc
->idx
);
240 hwc
->state
|= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
245 armpmu_start(struct perf_event
*event
, int flags
)
247 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
248 struct hw_perf_event
*hwc
= &event
->hw
;
251 * ARM pmu always has to reprogram the period, so ignore
252 * PERF_EF_RELOAD, see the comment below.
254 if (flags
& PERF_EF_RELOAD
)
255 WARN_ON_ONCE(!(hwc
->state
& PERF_HES_UPTODATE
));
259 * Set the period again. Some counters can't be stopped, so when we
260 * were stopped we simply disabled the IRQ source and the counter
261 * may have been left counting. If we don't do this step then we may
262 * get an interrupt too soon or *way* too late if the overflow has
263 * happened since disabling.
265 armpmu_event_set_period(event
, hwc
, hwc
->idx
);
266 armpmu
->enable(hwc
, hwc
->idx
);
270 armpmu_del(struct perf_event
*event
, int flags
)
272 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
273 struct pmu_hw_events
*hw_events
= armpmu
->get_hw_events();
274 struct hw_perf_event
*hwc
= &event
->hw
;
279 armpmu_stop(event
, PERF_EF_UPDATE
);
280 hw_events
->events
[idx
] = NULL
;
281 clear_bit(idx
, hw_events
->used_mask
);
283 perf_event_update_userpage(event
);
287 armpmu_add(struct perf_event
*event
, int flags
)
289 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
290 struct pmu_hw_events
*hw_events
= armpmu
->get_hw_events();
291 struct hw_perf_event
*hwc
= &event
->hw
;
295 perf_pmu_disable(event
->pmu
);
297 /* If we don't have a space for the counter then finish early. */
298 idx
= armpmu
->get_event_idx(hw_events
, hwc
);
305 * If there is an event in the counter we are going to use then make
306 * sure it is disabled.
309 armpmu
->disable(hwc
, idx
);
310 hw_events
->events
[idx
] = event
;
312 hwc
->state
= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
313 if (flags
& PERF_EF_START
)
314 armpmu_start(event
, PERF_EF_RELOAD
);
316 /* Propagate our changes to the userspace mapping. */
317 perf_event_update_userpage(event
);
320 perf_pmu_enable(event
->pmu
);
325 validate_event(struct pmu_hw_events
*hw_events
,
326 struct perf_event
*event
)
328 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
329 struct hw_perf_event fake_event
= event
->hw
;
330 struct pmu
*leader_pmu
= event
->group_leader
->pmu
;
332 if (is_software_event(event
))
335 if (event
->pmu
!= leader_pmu
|| event
->state
< PERF_EVENT_STATE_OFF
)
338 if (event
->state
== PERF_EVENT_STATE_OFF
&& !event
->attr
.enable_on_exec
)
341 return armpmu
->get_event_idx(hw_events
, &fake_event
) >= 0;
345 validate_group(struct perf_event
*event
)
347 struct perf_event
*sibling
, *leader
= event
->group_leader
;
348 struct pmu_hw_events fake_pmu
;
349 DECLARE_BITMAP(fake_used_mask
, ARMPMU_MAX_HWEVENTS
);
352 * Initialise the fake PMU. We only need to populate the
353 * used_mask for the purposes of validation.
355 memset(fake_used_mask
, 0, sizeof(fake_used_mask
));
356 fake_pmu
.used_mask
= fake_used_mask
;
358 if (!validate_event(&fake_pmu
, leader
))
361 list_for_each_entry(sibling
, &leader
->sibling_list
, group_entry
) {
362 if (!validate_event(&fake_pmu
, sibling
))
366 if (!validate_event(&fake_pmu
, event
))
373 armpmu_disable_percpu_irq(void *data
)
375 unsigned int irq
= *(unsigned int *)data
;
376 disable_percpu_irq(irq
);
380 armpmu_release_hardware(struct arm_pmu
*armpmu
)
383 unsigned int i
, irqs
;
384 struct platform_device
*pmu_device
= armpmu
->plat_device
;
386 irqs
= min(pmu_device
->num_resources
, num_possible_cpus());
390 irq
= platform_get_irq(pmu_device
, 0);
394 if (irq_is_percpu(irq
)) {
395 on_each_cpu(armpmu_disable_percpu_irq
, &irq
, 1);
396 free_percpu_irq(irq
, &cpu_hw_events
);
398 for (i
= 0; i
< irqs
; ++i
) {
399 if (!cpumask_test_and_clear_cpu(i
, &armpmu
->active_irqs
))
401 irq
= platform_get_irq(pmu_device
, i
);
403 free_irq(irq
, armpmu
);
409 armpmu_enable_percpu_irq(void *data
)
411 unsigned int irq
= *(unsigned int *)data
;
412 enable_percpu_irq(irq
, IRQ_TYPE_NONE
);
416 armpmu_reserve_hardware(struct arm_pmu
*armpmu
)
419 unsigned int i
, irqs
;
420 struct platform_device
*pmu_device
= armpmu
->plat_device
;
423 pr_err("no PMU device registered\n");
427 irqs
= min(pmu_device
->num_resources
, num_possible_cpus());
429 pr_err("no irqs for PMUs defined\n");
433 irq
= platform_get_irq(pmu_device
, 0);
435 pr_err("failed to get valid irq for PMU device\n");
439 if (irq_is_percpu(irq
)) {
440 err
= request_percpu_irq(irq
, armpmu
->handle_irq
,
441 "arm-pmu", &cpu_hw_events
);
444 pr_err("unable to request percpu IRQ%d for ARM PMU counters\n",
446 armpmu_release_hardware(armpmu
);
450 on_each_cpu(armpmu_enable_percpu_irq
, &irq
, 1);
452 for (i
= 0; i
< irqs
; ++i
) {
454 irq
= platform_get_irq(pmu_device
, i
);
459 * If we have a single PMU interrupt that we can't shift,
460 * assume that we're running on a uniprocessor machine and
461 * continue. Otherwise, continue without this interrupt.
463 if (irq_set_affinity(irq
, cpumask_of(i
)) && irqs
> 1) {
464 pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
469 err
= request_irq(irq
, armpmu
->handle_irq
,
473 pr_err("unable to request IRQ%d for ARM PMU counters\n",
475 armpmu_release_hardware(armpmu
);
479 cpumask_set_cpu(i
, &armpmu
->active_irqs
);
487 hw_perf_event_destroy(struct perf_event
*event
)
489 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
490 atomic_t
*active_events
= &armpmu
->active_events
;
491 struct mutex
*pmu_reserve_mutex
= &armpmu
->reserve_mutex
;
493 if (atomic_dec_and_mutex_lock(active_events
, pmu_reserve_mutex
)) {
494 armpmu_release_hardware(armpmu
);
495 mutex_unlock(pmu_reserve_mutex
);
500 event_requires_mode_exclusion(struct perf_event_attr
*attr
)
502 return attr
->exclude_idle
|| attr
->exclude_user
||
503 attr
->exclude_kernel
|| attr
->exclude_hv
;
507 __hw_perf_event_init(struct perf_event
*event
)
509 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
510 struct hw_perf_event
*hwc
= &event
->hw
;
513 mapping
= armpmu
->map_event(event
);
516 pr_debug("event %x:%llx not supported\n", event
->attr
.type
,
522 * We don't assign an index until we actually place the event onto
523 * hardware. Use -1 to signify that we haven't decided where to put it
524 * yet. For SMP systems, each core has it's own PMU so we can't do any
525 * clever allocation or constraints checking at this point.
528 hwc
->config_base
= 0;
533 * Check whether we need to exclude the counter from certain modes.
535 if ((!armpmu
->set_event_filter
||
536 armpmu
->set_event_filter(hwc
, &event
->attr
)) &&
537 event_requires_mode_exclusion(&event
->attr
)) {
538 pr_debug("ARM performance counters do not support mode exclusion\n");
543 * Store the event encoding into the config_base field.
545 hwc
->config_base
|= (unsigned long)mapping
;
547 if (!hwc
->sample_period
) {
549 * For non-sampling runs, limit the sample_period to half
550 * of the counter width. That way, the new counter value
551 * is far less likely to overtake the previous one unless
552 * you have some serious IRQ latency issues.
554 hwc
->sample_period
= armpmu
->max_period
>> 1;
555 hwc
->last_period
= hwc
->sample_period
;
556 local64_set(&hwc
->period_left
, hwc
->sample_period
);
560 if (event
->group_leader
!= event
) {
561 err
= validate_group(event
);
569 static int armpmu_event_init(struct perf_event
*event
)
571 struct arm_pmu
*armpmu
= to_arm_pmu(event
->pmu
);
573 atomic_t
*active_events
= &armpmu
->active_events
;
575 if (armpmu
->map_event(event
) == -ENOENT
)
578 event
->destroy
= hw_perf_event_destroy
;
580 if (!atomic_inc_not_zero(active_events
)) {
581 mutex_lock(&armpmu
->reserve_mutex
);
582 if (atomic_read(active_events
) == 0)
583 err
= armpmu_reserve_hardware(armpmu
);
586 atomic_inc(active_events
);
587 mutex_unlock(&armpmu
->reserve_mutex
);
593 err
= __hw_perf_event_init(event
);
595 hw_perf_event_destroy(event
);
600 static void armpmu_enable(struct pmu
*pmu
)
602 struct arm_pmu
*armpmu
= to_arm_pmu(pmu
);
603 struct pmu_hw_events
*hw_events
= armpmu
->get_hw_events();
604 int enabled
= bitmap_weight(hw_events
->used_mask
, armpmu
->num_events
);
610 static void armpmu_disable(struct pmu
*pmu
)
612 struct arm_pmu
*armpmu
= to_arm_pmu(pmu
);
616 static void __init
armpmu_init(struct arm_pmu
*armpmu
)
618 atomic_set(&armpmu
->active_events
, 0);
619 mutex_init(&armpmu
->reserve_mutex
);
621 armpmu
->pmu
= (struct pmu
) {
622 .pmu_enable
= armpmu_enable
,
623 .pmu_disable
= armpmu_disable
,
624 .event_init
= armpmu_event_init
,
627 .start
= armpmu_start
,
633 int __init
armpmu_register(struct arm_pmu
*armpmu
, char *name
, int type
)
636 return perf_pmu_register(&armpmu
->pmu
, name
, type
);
640 * ARMv8 PMUv3 Performance Events handling code.
641 * Common event types.
643 enum armv8_pmuv3_perf_types
{
644 /* Required events. */
645 ARMV8_PMUV3_PERFCTR_PMNC_SW_INCR
= 0x00,
646 ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL
= 0x03,
647 ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS
= 0x04,
648 ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED
= 0x10,
649 ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES
= 0x11,
650 ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED
= 0x12,
652 /* At least one of the following is required. */
653 ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED
= 0x08,
654 ARMV8_PMUV3_PERFCTR_OP_SPEC
= 0x1B,
656 /* Common architectural events. */
657 ARMV8_PMUV3_PERFCTR_MEM_READ
= 0x06,
658 ARMV8_PMUV3_PERFCTR_MEM_WRITE
= 0x07,
659 ARMV8_PMUV3_PERFCTR_EXC_TAKEN
= 0x09,
660 ARMV8_PMUV3_PERFCTR_EXC_EXECUTED
= 0x0A,
661 ARMV8_PMUV3_PERFCTR_CID_WRITE
= 0x0B,
662 ARMV8_PMUV3_PERFCTR_PC_WRITE
= 0x0C,
663 ARMV8_PMUV3_PERFCTR_PC_IMM_BRANCH
= 0x0D,
664 ARMV8_PMUV3_PERFCTR_PC_PROC_RETURN
= 0x0E,
665 ARMV8_PMUV3_PERFCTR_MEM_UNALIGNED_ACCESS
= 0x0F,
666 ARMV8_PMUV3_PERFCTR_TTBR_WRITE
= 0x1C,
668 /* Common microarchitectural events. */
669 ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL
= 0x01,
670 ARMV8_PMUV3_PERFCTR_ITLB_REFILL
= 0x02,
671 ARMV8_PMUV3_PERFCTR_DTLB_REFILL
= 0x05,
672 ARMV8_PMUV3_PERFCTR_MEM_ACCESS
= 0x13,
673 ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS
= 0x14,
674 ARMV8_PMUV3_PERFCTR_L1_DCACHE_WB
= 0x15,
675 ARMV8_PMUV3_PERFCTR_L2_CACHE_ACCESS
= 0x16,
676 ARMV8_PMUV3_PERFCTR_L2_CACHE_REFILL
= 0x17,
677 ARMV8_PMUV3_PERFCTR_L2_CACHE_WB
= 0x18,
678 ARMV8_PMUV3_PERFCTR_BUS_ACCESS
= 0x19,
679 ARMV8_PMUV3_PERFCTR_MEM_ERROR
= 0x1A,
680 ARMV8_PMUV3_PERFCTR_BUS_CYCLES
= 0x1D,
683 /* PMUv3 HW events mapping. */
684 static const unsigned armv8_pmuv3_perf_map
[PERF_COUNT_HW_MAX
] = {
685 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES
,
686 [PERF_COUNT_HW_INSTRUCTIONS
] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED
,
687 [PERF_COUNT_HW_CACHE_REFERENCES
] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS
,
688 [PERF_COUNT_HW_CACHE_MISSES
] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL
,
689 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = HW_OP_UNSUPPORTED
,
690 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED
,
691 [PERF_COUNT_HW_BUS_CYCLES
] = HW_OP_UNSUPPORTED
,
692 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] = HW_OP_UNSUPPORTED
,
693 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] = HW_OP_UNSUPPORTED
,
696 static const unsigned armv8_pmuv3_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
697 [PERF_COUNT_HW_CACHE_OP_MAX
]
698 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
701 [C(RESULT_ACCESS
)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS
,
702 [C(RESULT_MISS
)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL
,
705 [C(RESULT_ACCESS
)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS
,
706 [C(RESULT_MISS
)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL
,
709 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
710 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
715 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
716 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
719 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
720 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
723 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
724 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
729 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
730 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
733 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
734 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
737 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
738 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
743 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
744 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
747 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
748 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
751 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
752 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
757 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
758 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
761 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
762 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
765 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
766 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
771 [C(RESULT_ACCESS
)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED
,
772 [C(RESULT_MISS
)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED
,
775 [C(RESULT_ACCESS
)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED
,
776 [C(RESULT_MISS
)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED
,
779 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
780 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
785 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
786 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
789 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
790 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
793 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
794 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
800 * Perf Events' indices
802 #define ARMV8_IDX_CYCLE_COUNTER 0
803 #define ARMV8_IDX_COUNTER0 1
804 #define ARMV8_IDX_COUNTER_LAST (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
806 #define ARMV8_MAX_COUNTERS 32
807 #define ARMV8_COUNTER_MASK (ARMV8_MAX_COUNTERS - 1)
810 * ARMv8 low level PMU access
814 * Perf Event to low level counters mapping
816 #define ARMV8_IDX_TO_COUNTER(x) \
817 (((x) - ARMV8_IDX_COUNTER0) & ARMV8_COUNTER_MASK)
820 * Per-CPU PMCR: config reg
822 #define ARMV8_PMCR_E (1 << 0) /* Enable all counters */
823 #define ARMV8_PMCR_P (1 << 1) /* Reset all counters */
824 #define ARMV8_PMCR_C (1 << 2) /* Cycle counter reset */
825 #define ARMV8_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
826 #define ARMV8_PMCR_X (1 << 4) /* Export to ETM */
827 #define ARMV8_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
828 #define ARMV8_PMCR_N_SHIFT 11 /* Number of counters supported */
829 #define ARMV8_PMCR_N_MASK 0x1f
830 #define ARMV8_PMCR_MASK 0x3f /* Mask for writable bits */
833 * PMOVSR: counters overflow flag status reg
835 #define ARMV8_OVSR_MASK 0xffffffff /* Mask for writable bits */
836 #define ARMV8_OVERFLOWED_MASK ARMV8_OVSR_MASK
839 * PMXEVTYPER: Event selection reg
841 #define ARMV8_EVTYPE_MASK 0xc80003ff /* Mask for writable bits */
842 #define ARMV8_EVTYPE_EVENT 0x3ff /* Mask for EVENT bits */
845 * Event filters for PMUv3
847 #define ARMV8_EXCLUDE_EL1 (1 << 31)
848 #define ARMV8_EXCLUDE_EL0 (1 << 30)
849 #define ARMV8_INCLUDE_EL2 (1 << 27)
851 static inline u32
armv8pmu_pmcr_read(void)
854 asm volatile("mrs %0, pmcr_el0" : "=r" (val
));
858 static inline void armv8pmu_pmcr_write(u32 val
)
860 val
&= ARMV8_PMCR_MASK
;
862 asm volatile("msr pmcr_el0, %0" :: "r" (val
));
865 static inline int armv8pmu_has_overflowed(u32 pmovsr
)
867 return pmovsr
& ARMV8_OVERFLOWED_MASK
;
870 static inline int armv8pmu_counter_valid(int idx
)
872 return idx
>= ARMV8_IDX_CYCLE_COUNTER
&& idx
<= ARMV8_IDX_COUNTER_LAST
;
875 static inline int armv8pmu_counter_has_overflowed(u32 pmnc
, int idx
)
880 if (!armv8pmu_counter_valid(idx
)) {
881 pr_err("CPU%u checking wrong counter %d overflow status\n",
882 smp_processor_id(), idx
);
884 counter
= ARMV8_IDX_TO_COUNTER(idx
);
885 ret
= pmnc
& BIT(counter
);
891 static inline int armv8pmu_select_counter(int idx
)
895 if (!armv8pmu_counter_valid(idx
)) {
896 pr_err("CPU%u selecting wrong PMNC counter %d\n",
897 smp_processor_id(), idx
);
901 counter
= ARMV8_IDX_TO_COUNTER(idx
);
902 asm volatile("msr pmselr_el0, %0" :: "r" (counter
));
908 static inline u32
armv8pmu_read_counter(int idx
)
912 if (!armv8pmu_counter_valid(idx
))
913 pr_err("CPU%u reading wrong counter %d\n",
914 smp_processor_id(), idx
);
915 else if (idx
== ARMV8_IDX_CYCLE_COUNTER
)
916 asm volatile("mrs %0, pmccntr_el0" : "=r" (value
));
917 else if (armv8pmu_select_counter(idx
) == idx
)
918 asm volatile("mrs %0, pmxevcntr_el0" : "=r" (value
));
923 static inline void armv8pmu_write_counter(int idx
, u32 value
)
925 if (!armv8pmu_counter_valid(idx
))
926 pr_err("CPU%u writing wrong counter %d\n",
927 smp_processor_id(), idx
);
928 else if (idx
== ARMV8_IDX_CYCLE_COUNTER
)
929 asm volatile("msr pmccntr_el0, %0" :: "r" (value
));
930 else if (armv8pmu_select_counter(idx
) == idx
)
931 asm volatile("msr pmxevcntr_el0, %0" :: "r" (value
));
934 static inline void armv8pmu_write_evtype(int idx
, u32 val
)
936 if (armv8pmu_select_counter(idx
) == idx
) {
937 val
&= ARMV8_EVTYPE_MASK
;
938 asm volatile("msr pmxevtyper_el0, %0" :: "r" (val
));
942 static inline int armv8pmu_enable_counter(int idx
)
946 if (!armv8pmu_counter_valid(idx
)) {
947 pr_err("CPU%u enabling wrong PMNC counter %d\n",
948 smp_processor_id(), idx
);
952 counter
= ARMV8_IDX_TO_COUNTER(idx
);
953 asm volatile("msr pmcntenset_el0, %0" :: "r" (BIT(counter
)));
957 static inline int armv8pmu_disable_counter(int idx
)
961 if (!armv8pmu_counter_valid(idx
)) {
962 pr_err("CPU%u disabling wrong PMNC counter %d\n",
963 smp_processor_id(), idx
);
967 counter
= ARMV8_IDX_TO_COUNTER(idx
);
968 asm volatile("msr pmcntenclr_el0, %0" :: "r" (BIT(counter
)));
972 static inline int armv8pmu_enable_intens(int idx
)
976 if (!armv8pmu_counter_valid(idx
)) {
977 pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
978 smp_processor_id(), idx
);
982 counter
= ARMV8_IDX_TO_COUNTER(idx
);
983 asm volatile("msr pmintenset_el1, %0" :: "r" (BIT(counter
)));
987 static inline int armv8pmu_disable_intens(int idx
)
991 if (!armv8pmu_counter_valid(idx
)) {
992 pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
993 smp_processor_id(), idx
);
997 counter
= ARMV8_IDX_TO_COUNTER(idx
);
998 asm volatile("msr pmintenclr_el1, %0" :: "r" (BIT(counter
)));
1000 /* Clear the overflow flag in case an interrupt is pending. */
1001 asm volatile("msr pmovsclr_el0, %0" :: "r" (BIT(counter
)));
1006 static inline u32
armv8pmu_getreset_flags(void)
1011 asm volatile("mrs %0, pmovsclr_el0" : "=r" (value
));
1013 /* Write to clear flags */
1014 value
&= ARMV8_OVSR_MASK
;
1015 asm volatile("msr pmovsclr_el0, %0" :: "r" (value
));
1020 static void armv8pmu_enable_event(struct hw_perf_event
*hwc
, int idx
)
1022 unsigned long flags
;
1023 struct pmu_hw_events
*events
= cpu_pmu
->get_hw_events();
1026 * Enable counter and interrupt, and set the counter to count
1027 * the event that we're interested in.
1029 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
1034 armv8pmu_disable_counter(idx
);
1037 * Set event (if destined for PMNx counters).
1039 armv8pmu_write_evtype(idx
, hwc
->config_base
);
1042 * Enable interrupt for this counter
1044 armv8pmu_enable_intens(idx
);
1049 armv8pmu_enable_counter(idx
);
1051 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
1054 static void armv8pmu_disable_event(struct hw_perf_event
*hwc
, int idx
)
1056 unsigned long flags
;
1057 struct pmu_hw_events
*events
= cpu_pmu
->get_hw_events();
1060 * Disable counter and interrupt
1062 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
1067 armv8pmu_disable_counter(idx
);
1070 * Disable interrupt for this counter
1072 armv8pmu_disable_intens(idx
);
1074 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
1077 static irqreturn_t
armv8pmu_handle_irq(int irq_num
, void *dev
)
1080 struct perf_sample_data data
;
1081 struct pmu_hw_events
*cpuc
;
1082 struct pt_regs
*regs
;
1086 * Get and reset the IRQ flags
1088 pmovsr
= armv8pmu_getreset_flags();
1091 * Did an overflow occur?
1093 if (!armv8pmu_has_overflowed(pmovsr
))
1097 * Handle the counter(s) overflow(s)
1099 regs
= get_irq_regs();
1101 cpuc
= this_cpu_ptr(&cpu_hw_events
);
1102 for (idx
= 0; idx
< cpu_pmu
->num_events
; ++idx
) {
1103 struct perf_event
*event
= cpuc
->events
[idx
];
1104 struct hw_perf_event
*hwc
;
1106 /* Ignore if we don't have an event. */
1111 * We have a single interrupt for all counters. Check that
1112 * each counter has overflowed before we process it.
1114 if (!armv8pmu_counter_has_overflowed(pmovsr
, idx
))
1118 armpmu_event_update(event
, hwc
, idx
);
1119 perf_sample_data_init(&data
, 0, hwc
->last_period
);
1120 if (!armpmu_event_set_period(event
, hwc
, idx
))
1123 if (perf_event_overflow(event
, &data
, regs
))
1124 cpu_pmu
->disable(hwc
, idx
);
1128 * Handle the pending perf events.
1130 * Note: this call *must* be run with interrupts disabled. For
1131 * platforms that can have the PMU interrupts raised as an NMI, this
1139 static void armv8pmu_start(void)
1141 unsigned long flags
;
1142 struct pmu_hw_events
*events
= cpu_pmu
->get_hw_events();
1144 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
1145 /* Enable all counters */
1146 armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMCR_E
);
1147 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
1150 static void armv8pmu_stop(void)
1152 unsigned long flags
;
1153 struct pmu_hw_events
*events
= cpu_pmu
->get_hw_events();
1155 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
1156 /* Disable all counters */
1157 armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMCR_E
);
1158 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
1161 static int armv8pmu_get_event_idx(struct pmu_hw_events
*cpuc
,
1162 struct hw_perf_event
*event
)
1165 unsigned long evtype
= event
->config_base
& ARMV8_EVTYPE_EVENT
;
1167 /* Always place a cycle counter into the cycle counter. */
1168 if (evtype
== ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES
) {
1169 if (test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER
, cpuc
->used_mask
))
1172 return ARMV8_IDX_CYCLE_COUNTER
;
1176 * For anything other than a cycle counter, try and use
1177 * the events counters
1179 for (idx
= ARMV8_IDX_COUNTER0
; idx
< cpu_pmu
->num_events
; ++idx
) {
1180 if (!test_and_set_bit(idx
, cpuc
->used_mask
))
1184 /* The counters are all in use. */
1189 * Add an event filter to a given event. This will only work for PMUv2 PMUs.
1191 static int armv8pmu_set_event_filter(struct hw_perf_event
*event
,
1192 struct perf_event_attr
*attr
)
1194 unsigned long config_base
= 0;
1196 if (attr
->exclude_idle
)
1198 if (attr
->exclude_user
)
1199 config_base
|= ARMV8_EXCLUDE_EL0
;
1200 if (attr
->exclude_kernel
)
1201 config_base
|= ARMV8_EXCLUDE_EL1
;
1202 if (!attr
->exclude_hv
)
1203 config_base
|= ARMV8_INCLUDE_EL2
;
1206 * Install the filter into config_base as this is used to
1207 * construct the event type.
1209 event
->config_base
= config_base
;
1214 static void armv8pmu_reset(void *info
)
1216 u32 idx
, nb_cnt
= cpu_pmu
->num_events
;
1218 /* The counter and interrupt enable registers are unknown at reset. */
1219 for (idx
= ARMV8_IDX_CYCLE_COUNTER
; idx
< nb_cnt
; ++idx
)
1220 armv8pmu_disable_event(NULL
, idx
);
1222 /* Initialize & Reset PMNC: C and P bits. */
1223 armv8pmu_pmcr_write(ARMV8_PMCR_P
| ARMV8_PMCR_C
);
1225 /* Disable access from userspace. */
1226 asm volatile("msr pmuserenr_el0, %0" :: "r" (0));
1229 static int armv8_pmuv3_map_event(struct perf_event
*event
)
1231 return map_cpu_event(event
, &armv8_pmuv3_perf_map
,
1232 &armv8_pmuv3_perf_cache_map
,
1233 ARMV8_EVTYPE_EVENT
);
1236 static struct arm_pmu armv8pmu
= {
1237 .handle_irq
= armv8pmu_handle_irq
,
1238 .enable
= armv8pmu_enable_event
,
1239 .disable
= armv8pmu_disable_event
,
1240 .read_counter
= armv8pmu_read_counter
,
1241 .write_counter
= armv8pmu_write_counter
,
1242 .get_event_idx
= armv8pmu_get_event_idx
,
1243 .start
= armv8pmu_start
,
1244 .stop
= armv8pmu_stop
,
1245 .reset
= armv8pmu_reset
,
1246 .max_period
= (1LLU << 32) - 1,
1249 static u32 __init
armv8pmu_read_num_pmnc_events(void)
1253 /* Read the nb of CNTx counters supported from PMNC */
1254 nb_cnt
= (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT
) & ARMV8_PMCR_N_MASK
;
1256 /* Add the CPU cycles counter and return */
1260 static struct arm_pmu
*__init
armv8_pmuv3_pmu_init(void)
1262 armv8pmu
.name
= "arm/armv8-pmuv3";
1263 armv8pmu
.map_event
= armv8_pmuv3_map_event
;
1264 armv8pmu
.num_events
= armv8pmu_read_num_pmnc_events();
1265 armv8pmu
.set_event_filter
= armv8pmu_set_event_filter
;
1270 * Ensure the PMU has sane values out of reset.
1271 * This requires SMP to be available, so exists as a separate initcall.
1276 if (cpu_pmu
&& cpu_pmu
->reset
)
1277 return on_each_cpu(cpu_pmu
->reset
, NULL
, 1);
1280 arch_initcall(cpu_pmu_reset
);
1283 * PMU platform driver and devicetree bindings.
1285 static const struct of_device_id armpmu_of_device_ids
[] = {
1286 {.compatible
= "arm,armv8-pmuv3"},
1290 static int armpmu_device_probe(struct platform_device
*pdev
)
1295 cpu_pmu
->plat_device
= pdev
;
1299 static struct platform_driver armpmu_driver
= {
1302 .of_match_table
= armpmu_of_device_ids
,
1304 .probe
= armpmu_device_probe
,
1307 static int __init
register_pmu_driver(void)
1309 return platform_driver_register(&armpmu_driver
);
1311 device_initcall(register_pmu_driver
);
1313 static struct pmu_hw_events
*armpmu_get_cpu_events(void)
1315 return this_cpu_ptr(&cpu_hw_events
);
1318 static void __init
cpu_pmu_init(struct arm_pmu
*armpmu
)
1321 for_each_possible_cpu(cpu
) {
1322 struct pmu_hw_events
*events
= &per_cpu(cpu_hw_events
, cpu
);
1323 events
->events
= per_cpu(hw_events
, cpu
);
1324 events
->used_mask
= per_cpu(used_mask
, cpu
);
1325 raw_spin_lock_init(&events
->pmu_lock
);
1327 armpmu
->get_hw_events
= armpmu_get_cpu_events
;
1330 static int __init
init_hw_perf_events(void)
1332 u64 dfr
= read_cpuid(ID_AA64DFR0_EL1
);
1334 switch ((dfr
>> 8) & 0xf) {
1335 case 0x1: /* PMUv3 */
1336 cpu_pmu
= armv8_pmuv3_pmu_init();
1341 pr_info("enabled with %s PMU driver, %d counters available\n",
1342 cpu_pmu
->name
, cpu_pmu
->num_events
);
1343 cpu_pmu_init(cpu_pmu
);
1344 armpmu_register(cpu_pmu
, "cpu", PERF_TYPE_RAW
);
1346 pr_info("no hardware support available\n");
1351 early_initcall(init_hw_perf_events
);
1354 * Callchain handling code.
1357 struct frame_tail __user
*fp
;
1359 } __attribute__((packed
));
1362 * Get the return address for a single stackframe and return a pointer to the
1365 static struct frame_tail __user
*
1366 user_backtrace(struct frame_tail __user
*tail
,
1367 struct perf_callchain_entry
*entry
)
1369 struct frame_tail buftail
;
1372 /* Also check accessibility of one struct frame_tail beyond */
1373 if (!access_ok(VERIFY_READ
, tail
, sizeof(buftail
)))
1376 pagefault_disable();
1377 err
= __copy_from_user_inatomic(&buftail
, tail
, sizeof(buftail
));
1383 perf_callchain_store(entry
, buftail
.lr
);
1386 * Frame pointers should strictly progress back up the stack
1387 * (towards higher addresses).
1389 if (tail
>= buftail
.fp
)
1395 #ifdef CONFIG_COMPAT
1397 * The registers we're interested in are at the end of the variable
1398 * length saved register structure. The fp points at the end of this
1399 * structure so the address of this struct is:
1400 * (struct compat_frame_tail *)(xxx->fp)-1
1402 * This code has been adapted from the ARM OProfile support.
1404 struct compat_frame_tail
{
1405 compat_uptr_t fp
; /* a (struct compat_frame_tail *) in compat mode */
1408 } __attribute__((packed
));
1410 static struct compat_frame_tail __user
*
1411 compat_user_backtrace(struct compat_frame_tail __user
*tail
,
1412 struct perf_callchain_entry
*entry
)
1414 struct compat_frame_tail buftail
;
1417 /* Also check accessibility of one struct frame_tail beyond */
1418 if (!access_ok(VERIFY_READ
, tail
, sizeof(buftail
)))
1421 pagefault_disable();
1422 err
= __copy_from_user_inatomic(&buftail
, tail
, sizeof(buftail
));
1428 perf_callchain_store(entry
, buftail
.lr
);
1431 * Frame pointers should strictly progress back up the stack
1432 * (towards higher addresses).
1434 if (tail
+ 1 >= (struct compat_frame_tail __user
*)
1435 compat_ptr(buftail
.fp
))
1438 return (struct compat_frame_tail __user
*)compat_ptr(buftail
.fp
) - 1;
1440 #endif /* CONFIG_COMPAT */
1442 void perf_callchain_user(struct perf_callchain_entry
*entry
,
1443 struct pt_regs
*regs
)
1445 if (perf_guest_cbs
&& perf_guest_cbs
->is_in_guest()) {
1446 /* We don't support guest os callchain now */
1450 perf_callchain_store(entry
, regs
->pc
);
1452 if (!compat_user_mode(regs
)) {
1454 struct frame_tail __user
*tail
;
1456 tail
= (struct frame_tail __user
*)regs
->regs
[29];
1458 while (entry
->nr
< PERF_MAX_STACK_DEPTH
&&
1459 tail
&& !((unsigned long)tail
& 0xf))
1460 tail
= user_backtrace(tail
, entry
);
1462 #ifdef CONFIG_COMPAT
1463 /* AARCH32 compat mode */
1464 struct compat_frame_tail __user
*tail
;
1466 tail
= (struct compat_frame_tail __user
*)regs
->compat_fp
- 1;
1468 while ((entry
->nr
< PERF_MAX_STACK_DEPTH
) &&
1469 tail
&& !((unsigned long)tail
& 0x3))
1470 tail
= compat_user_backtrace(tail
, entry
);
1476 * Gets called by walk_stackframe() for every stackframe. This will be called
1477 * whist unwinding the stackframe and is like a subroutine return so we use
1480 static int callchain_trace(struct stackframe
*frame
, void *data
)
1482 struct perf_callchain_entry
*entry
= data
;
1483 perf_callchain_store(entry
, frame
->pc
);
1487 void perf_callchain_kernel(struct perf_callchain_entry
*entry
,
1488 struct pt_regs
*regs
)
1490 struct stackframe frame
;
1492 if (perf_guest_cbs
&& perf_guest_cbs
->is_in_guest()) {
1493 /* We don't support guest os callchain now */
1497 frame
.fp
= regs
->regs
[29];
1498 frame
.sp
= regs
->sp
;
1499 frame
.pc
= regs
->pc
;
1501 walk_stackframe(&frame
, callchain_trace
, entry
);
1504 unsigned long perf_instruction_pointer(struct pt_regs
*regs
)
1506 if (perf_guest_cbs
&& perf_guest_cbs
->is_in_guest())
1507 return perf_guest_cbs
->get_guest_ip();
1509 return instruction_pointer(regs
);
1512 unsigned long perf_misc_flags(struct pt_regs
*regs
)
1516 if (perf_guest_cbs
&& perf_guest_cbs
->is_in_guest()) {
1517 if (perf_guest_cbs
->is_user_mode())
1518 misc
|= PERF_RECORD_MISC_GUEST_USER
;
1520 misc
|= PERF_RECORD_MISC_GUEST_KERNEL
;
1522 if (user_mode(regs
))
1523 misc
|= PERF_RECORD_MISC_USER
;
1525 misc
|= PERF_RECORD_MISC_KERNEL
;