4 * ARM performance counter support.
6 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
8 * ARMv7 support: Jean Pihet <jpihet@mvista.com>
9 * 2010 (c) MontaVista Software, LLC.
11 * This code is based on the sparc64 perf event code, which is in turn based
12 * on the x86 code. Callchain code is based on the ARM OProfile backtrace
15 #define pr_fmt(fmt) "hw perfevents: " fmt
17 #include <linux/interrupt.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/perf_event.h>
21 #include <linux/platform_device.h>
22 #include <linux/spinlock.h>
23 #include <linux/uaccess.h>
25 #include <asm/cputype.h>
27 #include <asm/irq_regs.h>
29 #include <asm/stacktrace.h>
31 static struct platform_device
*pmu_device
;
34 * Hardware lock to serialize accesses to PMU registers. Needed for the
35 * read/modify/write sequences.
37 DEFINE_SPINLOCK(pmu_lock
);
40 * ARMv6 supports a maximum of 3 events, starting from index 1. If we add
41 * another platform that supports more, we need to increase this to be the
42 * largest of all platforms.
44 * ARMv7 supports up to 32 events:
45 * cycle counter CCNT + 31 events counters CNT0..30.
46 * Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters.
48 #define ARMPMU_MAX_HWEVENTS 33
50 /* The events for a given CPU. */
51 struct cpu_hw_events
{
53 * The events that are active on the CPU for the given index. Index 0
56 struct perf_event
*events
[ARMPMU_MAX_HWEVENTS
];
59 * A 1 bit for an index indicates that the counter is being used for
60 * an event. A 0 means that the counter can be used.
62 unsigned long used_mask
[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS
)];
65 * A 1 bit for an index indicates that the counter is actively being
68 unsigned long active_mask
[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS
)];
70 DEFINE_PER_CPU(struct cpu_hw_events
, cpu_hw_events
);
73 static const char *arm_pmu_names
[] = {
74 [ARM_PERF_PMU_ID_XSCALE1
] = "xscale1",
75 [ARM_PERF_PMU_ID_XSCALE2
] = "xscale2",
76 [ARM_PERF_PMU_ID_V6
] = "v6",
77 [ARM_PERF_PMU_ID_V6MP
] = "v6mpcore",
78 [ARM_PERF_PMU_ID_CA8
] = "ARMv7 Cortex-A8",
79 [ARM_PERF_PMU_ID_CA9
] = "ARMv7 Cortex-A9",
83 enum arm_perf_pmu_ids id
;
84 irqreturn_t (*handle_irq
)(int irq_num
, void *dev
);
85 void (*enable
)(struct hw_perf_event
*evt
, int idx
);
86 void (*disable
)(struct hw_perf_event
*evt
, int idx
);
87 int (*event_map
)(int evt
);
88 u64 (*raw_event
)(u64
);
89 int (*get_event_idx
)(struct cpu_hw_events
*cpuc
,
90 struct hw_perf_event
*hwc
);
91 u32 (*read_counter
)(int idx
);
92 void (*write_counter
)(int idx
, u32 val
);
99 /* Set at runtime when we know what CPU type we are. */
100 static const struct arm_pmu
*armpmu
;
102 enum arm_perf_pmu_ids
103 armpmu_get_pmu_id(void)
112 EXPORT_SYMBOL_GPL(armpmu_get_pmu_id
);
114 #define HW_OP_UNSUPPORTED 0xFFFF
117 PERF_COUNT_HW_CACHE_##_x
119 #define CACHE_OP_UNSUPPORTED 0xFFFF
121 static unsigned armpmu_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
122 [PERF_COUNT_HW_CACHE_OP_MAX
]
123 [PERF_COUNT_HW_CACHE_RESULT_MAX
];
126 armpmu_map_cache_event(u64 config
)
128 unsigned int cache_type
, cache_op
, cache_result
, ret
;
130 cache_type
= (config
>> 0) & 0xff;
131 if (cache_type
>= PERF_COUNT_HW_CACHE_MAX
)
134 cache_op
= (config
>> 8) & 0xff;
135 if (cache_op
>= PERF_COUNT_HW_CACHE_OP_MAX
)
138 cache_result
= (config
>> 16) & 0xff;
139 if (cache_result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
142 ret
= (int)armpmu_perf_cache_map
[cache_type
][cache_op
][cache_result
];
144 if (ret
== CACHE_OP_UNSUPPORTED
)
151 armpmu_event_set_period(struct perf_event
*event
,
152 struct hw_perf_event
*hwc
,
155 s64 left
= atomic64_read(&hwc
->period_left
);
156 s64 period
= hwc
->sample_period
;
159 if (unlikely(left
<= -period
)) {
161 atomic64_set(&hwc
->period_left
, left
);
162 hwc
->last_period
= period
;
166 if (unlikely(left
<= 0)) {
168 atomic64_set(&hwc
->period_left
, left
);
169 hwc
->last_period
= period
;
173 if (left
> (s64
)armpmu
->max_period
)
174 left
= armpmu
->max_period
;
176 atomic64_set(&hwc
->prev_count
, (u64
)-left
);
178 armpmu
->write_counter(idx
, (u64
)(-left
) & 0xffffffff);
180 perf_event_update_userpage(event
);
186 armpmu_event_update(struct perf_event
*event
,
187 struct hw_perf_event
*hwc
,
191 s64 prev_raw_count
, new_raw_count
;
195 prev_raw_count
= atomic64_read(&hwc
->prev_count
);
196 new_raw_count
= armpmu
->read_counter(idx
);
198 if (atomic64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
199 new_raw_count
) != prev_raw_count
)
202 delta
= (new_raw_count
<< shift
) - (prev_raw_count
<< shift
);
205 atomic64_add(delta
, &event
->count
);
206 atomic64_sub(delta
, &hwc
->period_left
);
208 return new_raw_count
;
212 armpmu_disable(struct perf_event
*event
)
214 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
215 struct hw_perf_event
*hwc
= &event
->hw
;
220 clear_bit(idx
, cpuc
->active_mask
);
221 armpmu
->disable(hwc
, idx
);
225 armpmu_event_update(event
, hwc
, idx
);
226 cpuc
->events
[idx
] = NULL
;
227 clear_bit(idx
, cpuc
->used_mask
);
229 perf_event_update_userpage(event
);
233 armpmu_read(struct perf_event
*event
)
235 struct hw_perf_event
*hwc
= &event
->hw
;
237 /* Don't read disabled counters! */
241 armpmu_event_update(event
, hwc
, hwc
->idx
);
245 armpmu_unthrottle(struct perf_event
*event
)
247 struct hw_perf_event
*hwc
= &event
->hw
;
250 * Set the period again. Some counters can't be stopped, so when we
251 * were throttled we simply disabled the IRQ source and the counter
252 * may have been left counting. If we don't do this step then we may
253 * get an interrupt too soon or *way* too late if the overflow has
254 * happened since disabling.
256 armpmu_event_set_period(event
, hwc
, hwc
->idx
);
257 armpmu
->enable(hwc
, hwc
->idx
);
261 armpmu_enable(struct perf_event
*event
)
263 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
264 struct hw_perf_event
*hwc
= &event
->hw
;
268 /* If we don't have a space for the counter then finish early. */
269 idx
= armpmu
->get_event_idx(cpuc
, hwc
);
276 * If there is an event in the counter we are going to use then make
277 * sure it is disabled.
280 armpmu
->disable(hwc
, idx
);
281 cpuc
->events
[idx
] = event
;
282 set_bit(idx
, cpuc
->active_mask
);
284 /* Set the period for the event. */
285 armpmu_event_set_period(event
, hwc
, idx
);
287 /* Enable the event. */
288 armpmu
->enable(hwc
, idx
);
290 /* Propagate our changes to the userspace mapping. */
291 perf_event_update_userpage(event
);
297 static struct pmu pmu
= {
298 .enable
= armpmu_enable
,
299 .disable
= armpmu_disable
,
300 .unthrottle
= armpmu_unthrottle
,
305 validate_event(struct cpu_hw_events
*cpuc
,
306 struct perf_event
*event
)
308 struct hw_perf_event fake_event
= event
->hw
;
310 if (event
->pmu
&& event
->pmu
!= &pmu
)
313 return armpmu
->get_event_idx(cpuc
, &fake_event
) >= 0;
317 validate_group(struct perf_event
*event
)
319 struct perf_event
*sibling
, *leader
= event
->group_leader
;
320 struct cpu_hw_events fake_pmu
;
322 memset(&fake_pmu
, 0, sizeof(fake_pmu
));
324 if (!validate_event(&fake_pmu
, leader
))
327 list_for_each_entry(sibling
, &leader
->sibling_list
, group_entry
) {
328 if (!validate_event(&fake_pmu
, sibling
))
332 if (!validate_event(&fake_pmu
, event
))
339 armpmu_reserve_hardware(void)
341 int i
, err
= -ENODEV
, irq
;
343 pmu_device
= reserve_pmu(ARM_PMU_DEVICE_CPU
);
344 if (IS_ERR(pmu_device
)) {
345 pr_warning("unable to reserve pmu\n");
346 return PTR_ERR(pmu_device
);
349 init_pmu(ARM_PMU_DEVICE_CPU
);
351 if (pmu_device
->num_resources
< 1) {
352 pr_err("no irqs for PMUs defined\n");
356 for (i
= 0; i
< pmu_device
->num_resources
; ++i
) {
357 irq
= platform_get_irq(pmu_device
, i
);
361 err
= request_irq(irq
, armpmu
->handle_irq
,
362 IRQF_DISABLED
| IRQF_NOBALANCING
,
365 pr_warning("unable to request IRQ%d for ARM perf "
372 for (i
= i
- 1; i
>= 0; --i
) {
373 irq
= platform_get_irq(pmu_device
, i
);
377 release_pmu(pmu_device
);
385 armpmu_release_hardware(void)
389 for (i
= pmu_device
->num_resources
- 1; i
>= 0; --i
) {
390 irq
= platform_get_irq(pmu_device
, i
);
396 release_pmu(pmu_device
);
400 static atomic_t active_events
= ATOMIC_INIT(0);
401 static DEFINE_MUTEX(pmu_reserve_mutex
);
404 hw_perf_event_destroy(struct perf_event
*event
)
406 if (atomic_dec_and_mutex_lock(&active_events
, &pmu_reserve_mutex
)) {
407 armpmu_release_hardware();
408 mutex_unlock(&pmu_reserve_mutex
);
413 __hw_perf_event_init(struct perf_event
*event
)
415 struct hw_perf_event
*hwc
= &event
->hw
;
418 /* Decode the generic type into an ARM event identifier. */
419 if (PERF_TYPE_HARDWARE
== event
->attr
.type
) {
420 mapping
= armpmu
->event_map(event
->attr
.config
);
421 } else if (PERF_TYPE_HW_CACHE
== event
->attr
.type
) {
422 mapping
= armpmu_map_cache_event(event
->attr
.config
);
423 } else if (PERF_TYPE_RAW
== event
->attr
.type
) {
424 mapping
= armpmu
->raw_event(event
->attr
.config
);
426 pr_debug("event type %x not supported\n", event
->attr
.type
);
431 pr_debug("event %x:%llx not supported\n", event
->attr
.type
,
437 * Check whether we need to exclude the counter from certain modes.
438 * The ARM performance counters are on all of the time so if someone
439 * has asked us for some excludes then we have to fail.
441 if (event
->attr
.exclude_kernel
|| event
->attr
.exclude_user
||
442 event
->attr
.exclude_hv
|| event
->attr
.exclude_idle
) {
443 pr_debug("ARM performance counters do not support "
449 * We don't assign an index until we actually place the event onto
450 * hardware. Use -1 to signify that we haven't decided where to put it
451 * yet. For SMP systems, each core has it's own PMU so we can't do any
452 * clever allocation or constraints checking at this point.
457 * Store the event encoding into the config_base field. config and
458 * event_base are unused as the only 2 things we need to know are
459 * the event mapping and the counter to use. The counter to use is
460 * also the indx and the config_base is the event type.
462 hwc
->config_base
= (unsigned long)mapping
;
466 if (!hwc
->sample_period
) {
467 hwc
->sample_period
= armpmu
->max_period
;
468 hwc
->last_period
= hwc
->sample_period
;
469 atomic64_set(&hwc
->period_left
, hwc
->sample_period
);
473 if (event
->group_leader
!= event
) {
474 err
= validate_group(event
);
483 hw_perf_event_init(struct perf_event
*event
)
488 return ERR_PTR(-ENODEV
);
490 event
->destroy
= hw_perf_event_destroy
;
492 if (!atomic_inc_not_zero(&active_events
)) {
493 if (atomic_read(&active_events
) > perf_max_events
) {
494 atomic_dec(&active_events
);
495 return ERR_PTR(-ENOSPC
);
498 mutex_lock(&pmu_reserve_mutex
);
499 if (atomic_read(&active_events
) == 0) {
500 err
= armpmu_reserve_hardware();
504 atomic_inc(&active_events
);
505 mutex_unlock(&pmu_reserve_mutex
);
511 err
= __hw_perf_event_init(event
);
513 hw_perf_event_destroy(event
);
515 return err
? ERR_PTR(err
) : &pmu
;
521 /* Enable all of the perf events on hardware. */
523 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
528 for (idx
= 0; idx
<= armpmu
->num_events
; ++idx
) {
529 struct perf_event
*event
= cpuc
->events
[idx
];
534 armpmu
->enable(&event
->hw
, idx
);
541 hw_perf_disable(void)
548 * ARMv6 Performance counter handling code.
550 * ARMv6 has 2 configurable performance counters and a single cycle counter.
551 * They all share a single reset bit but can be written to zero so we can use
554 * The counters can't be individually enabled or disabled so when we remove
555 * one event and replace it with another we could get spurious counts from the
556 * wrong event. However, we can take advantage of the fact that the
557 * performance counters can export events to the event bus, and the event bus
558 * itself can be monitored. This requires that we *don't* export the events to
559 * the event bus. The procedure for disabling a configurable counter is:
560 * - change the counter to count the ETMEXTOUT[0] signal (0x20). This
561 * effectively stops the counter from counting.
562 * - disable the counter's interrupt generation (each counter has it's
563 * own interrupt enable bit).
564 * Once stopped, the counter value can be written as 0 to reset.
566 * To enable a counter:
567 * - enable the counter's interrupt generation.
568 * - set the new event type.
570 * Note: the dedicated cycle counter only counts cycles and can't be
571 * enabled/disabled independently of the others. When we want to disable the
572 * cycle counter, we have to just disable the interrupt reporting and start
573 * ignoring that counter. When re-enabling, we have to reset the value and
574 * enable the interrupt.
577 enum armv6_perf_types
{
578 ARMV6_PERFCTR_ICACHE_MISS
= 0x0,
579 ARMV6_PERFCTR_IBUF_STALL
= 0x1,
580 ARMV6_PERFCTR_DDEP_STALL
= 0x2,
581 ARMV6_PERFCTR_ITLB_MISS
= 0x3,
582 ARMV6_PERFCTR_DTLB_MISS
= 0x4,
583 ARMV6_PERFCTR_BR_EXEC
= 0x5,
584 ARMV6_PERFCTR_BR_MISPREDICT
= 0x6,
585 ARMV6_PERFCTR_INSTR_EXEC
= 0x7,
586 ARMV6_PERFCTR_DCACHE_HIT
= 0x9,
587 ARMV6_PERFCTR_DCACHE_ACCESS
= 0xA,
588 ARMV6_PERFCTR_DCACHE_MISS
= 0xB,
589 ARMV6_PERFCTR_DCACHE_WBACK
= 0xC,
590 ARMV6_PERFCTR_SW_PC_CHANGE
= 0xD,
591 ARMV6_PERFCTR_MAIN_TLB_MISS
= 0xF,
592 ARMV6_PERFCTR_EXPL_D_ACCESS
= 0x10,
593 ARMV6_PERFCTR_LSU_FULL_STALL
= 0x11,
594 ARMV6_PERFCTR_WBUF_DRAINED
= 0x12,
595 ARMV6_PERFCTR_CPU_CYCLES
= 0xFF,
596 ARMV6_PERFCTR_NOP
= 0x20,
599 enum armv6_counters
{
600 ARMV6_CYCLE_COUNTER
= 1,
606 * The hardware events that we support. We do support cache operations but
607 * we have harvard caches and no way to combine instruction and data
608 * accesses/misses in hardware.
610 static const unsigned armv6_perf_map
[PERF_COUNT_HW_MAX
] = {
611 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV6_PERFCTR_CPU_CYCLES
,
612 [PERF_COUNT_HW_INSTRUCTIONS
] = ARMV6_PERFCTR_INSTR_EXEC
,
613 [PERF_COUNT_HW_CACHE_REFERENCES
] = HW_OP_UNSUPPORTED
,
614 [PERF_COUNT_HW_CACHE_MISSES
] = HW_OP_UNSUPPORTED
,
615 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = ARMV6_PERFCTR_BR_EXEC
,
616 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV6_PERFCTR_BR_MISPREDICT
,
617 [PERF_COUNT_HW_BUS_CYCLES
] = HW_OP_UNSUPPORTED
,
620 static const unsigned armv6_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
621 [PERF_COUNT_HW_CACHE_OP_MAX
]
622 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
625 * The performance counters don't differentiate between read
626 * and write accesses/misses so this isn't strictly correct,
627 * but it's the best we can do. Writes and reads get
631 [C(RESULT_ACCESS
)] = ARMV6_PERFCTR_DCACHE_ACCESS
,
632 [C(RESULT_MISS
)] = ARMV6_PERFCTR_DCACHE_MISS
,
635 [C(RESULT_ACCESS
)] = ARMV6_PERFCTR_DCACHE_ACCESS
,
636 [C(RESULT_MISS
)] = ARMV6_PERFCTR_DCACHE_MISS
,
639 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
640 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
645 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
646 [C(RESULT_MISS
)] = ARMV6_PERFCTR_ICACHE_MISS
,
649 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
650 [C(RESULT_MISS
)] = ARMV6_PERFCTR_ICACHE_MISS
,
653 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
654 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
659 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
660 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
663 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
664 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
667 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
668 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
673 * The ARM performance counters can count micro DTLB misses,
674 * micro ITLB misses and main TLB misses. There isn't an event
675 * for TLB misses, so use the micro misses here and if users
676 * want the main TLB misses they can use a raw counter.
679 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
680 [C(RESULT_MISS
)] = ARMV6_PERFCTR_DTLB_MISS
,
683 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
684 [C(RESULT_MISS
)] = ARMV6_PERFCTR_DTLB_MISS
,
687 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
688 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
693 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
694 [C(RESULT_MISS
)] = ARMV6_PERFCTR_ITLB_MISS
,
697 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
698 [C(RESULT_MISS
)] = ARMV6_PERFCTR_ITLB_MISS
,
701 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
702 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
707 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
708 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
711 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
712 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
715 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
716 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
721 enum armv6mpcore_perf_types
{
722 ARMV6MPCORE_PERFCTR_ICACHE_MISS
= 0x0,
723 ARMV6MPCORE_PERFCTR_IBUF_STALL
= 0x1,
724 ARMV6MPCORE_PERFCTR_DDEP_STALL
= 0x2,
725 ARMV6MPCORE_PERFCTR_ITLB_MISS
= 0x3,
726 ARMV6MPCORE_PERFCTR_DTLB_MISS
= 0x4,
727 ARMV6MPCORE_PERFCTR_BR_EXEC
= 0x5,
728 ARMV6MPCORE_PERFCTR_BR_NOTPREDICT
= 0x6,
729 ARMV6MPCORE_PERFCTR_BR_MISPREDICT
= 0x7,
730 ARMV6MPCORE_PERFCTR_INSTR_EXEC
= 0x8,
731 ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS
= 0xA,
732 ARMV6MPCORE_PERFCTR_DCACHE_RDMISS
= 0xB,
733 ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS
= 0xC,
734 ARMV6MPCORE_PERFCTR_DCACHE_WRMISS
= 0xD,
735 ARMV6MPCORE_PERFCTR_DCACHE_EVICTION
= 0xE,
736 ARMV6MPCORE_PERFCTR_SW_PC_CHANGE
= 0xF,
737 ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS
= 0x10,
738 ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS
= 0x11,
739 ARMV6MPCORE_PERFCTR_LSU_FULL_STALL
= 0x12,
740 ARMV6MPCORE_PERFCTR_WBUF_DRAINED
= 0x13,
741 ARMV6MPCORE_PERFCTR_CPU_CYCLES
= 0xFF,
745 * The hardware events that we support. We do support cache operations but
746 * we have harvard caches and no way to combine instruction and data
747 * accesses/misses in hardware.
749 static const unsigned armv6mpcore_perf_map
[PERF_COUNT_HW_MAX
] = {
750 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV6MPCORE_PERFCTR_CPU_CYCLES
,
751 [PERF_COUNT_HW_INSTRUCTIONS
] = ARMV6MPCORE_PERFCTR_INSTR_EXEC
,
752 [PERF_COUNT_HW_CACHE_REFERENCES
] = HW_OP_UNSUPPORTED
,
753 [PERF_COUNT_HW_CACHE_MISSES
] = HW_OP_UNSUPPORTED
,
754 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = ARMV6MPCORE_PERFCTR_BR_EXEC
,
755 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT
,
756 [PERF_COUNT_HW_BUS_CYCLES
] = HW_OP_UNSUPPORTED
,
759 static const unsigned armv6mpcore_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
760 [PERF_COUNT_HW_CACHE_OP_MAX
]
761 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
765 ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS
,
767 ARMV6MPCORE_PERFCTR_DCACHE_RDMISS
,
771 ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS
,
773 ARMV6MPCORE_PERFCTR_DCACHE_WRMISS
,
776 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
777 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
782 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
783 [C(RESULT_MISS
)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS
,
786 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
787 [C(RESULT_MISS
)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS
,
790 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
791 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
796 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
797 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
800 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
801 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
804 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
805 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
810 * The ARM performance counters can count micro DTLB misses,
811 * micro ITLB misses and main TLB misses. There isn't an event
812 * for TLB misses, so use the micro misses here and if users
813 * want the main TLB misses they can use a raw counter.
816 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
817 [C(RESULT_MISS
)] = ARMV6MPCORE_PERFCTR_DTLB_MISS
,
820 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
821 [C(RESULT_MISS
)] = ARMV6MPCORE_PERFCTR_DTLB_MISS
,
824 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
825 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
830 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
831 [C(RESULT_MISS
)] = ARMV6MPCORE_PERFCTR_ITLB_MISS
,
834 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
835 [C(RESULT_MISS
)] = ARMV6MPCORE_PERFCTR_ITLB_MISS
,
838 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
839 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
844 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
845 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
848 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
849 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
852 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
853 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
858 static inline unsigned long
859 armv6_pmcr_read(void)
862 asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r"(val
));
867 armv6_pmcr_write(unsigned long val
)
869 asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r"(val
));
872 #define ARMV6_PMCR_ENABLE (1 << 0)
873 #define ARMV6_PMCR_CTR01_RESET (1 << 1)
874 #define ARMV6_PMCR_CCOUNT_RESET (1 << 2)
875 #define ARMV6_PMCR_CCOUNT_DIV (1 << 3)
876 #define ARMV6_PMCR_COUNT0_IEN (1 << 4)
877 #define ARMV6_PMCR_COUNT1_IEN (1 << 5)
878 #define ARMV6_PMCR_CCOUNT_IEN (1 << 6)
879 #define ARMV6_PMCR_COUNT0_OVERFLOW (1 << 8)
880 #define ARMV6_PMCR_COUNT1_OVERFLOW (1 << 9)
881 #define ARMV6_PMCR_CCOUNT_OVERFLOW (1 << 10)
882 #define ARMV6_PMCR_EVT_COUNT0_SHIFT 20
883 #define ARMV6_PMCR_EVT_COUNT0_MASK (0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT)
884 #define ARMV6_PMCR_EVT_COUNT1_SHIFT 12
885 #define ARMV6_PMCR_EVT_COUNT1_MASK (0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT)
887 #define ARMV6_PMCR_OVERFLOWED_MASK \
888 (ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \
889 ARMV6_PMCR_CCOUNT_OVERFLOW)
892 armv6_pmcr_has_overflowed(unsigned long pmcr
)
894 return (pmcr
& ARMV6_PMCR_OVERFLOWED_MASK
);
898 armv6_pmcr_counter_has_overflowed(unsigned long pmcr
,
899 enum armv6_counters counter
)
903 if (ARMV6_CYCLE_COUNTER
== counter
)
904 ret
= pmcr
& ARMV6_PMCR_CCOUNT_OVERFLOW
;
905 else if (ARMV6_COUNTER0
== counter
)
906 ret
= pmcr
& ARMV6_PMCR_COUNT0_OVERFLOW
;
907 else if (ARMV6_COUNTER1
== counter
)
908 ret
= pmcr
& ARMV6_PMCR_COUNT1_OVERFLOW
;
910 WARN_ONCE(1, "invalid counter number (%d)\n", counter
);
916 armv6pmu_read_counter(int counter
)
918 unsigned long value
= 0;
920 if (ARMV6_CYCLE_COUNTER
== counter
)
921 asm volatile("mrc p15, 0, %0, c15, c12, 1" : "=r"(value
));
922 else if (ARMV6_COUNTER0
== counter
)
923 asm volatile("mrc p15, 0, %0, c15, c12, 2" : "=r"(value
));
924 else if (ARMV6_COUNTER1
== counter
)
925 asm volatile("mrc p15, 0, %0, c15, c12, 3" : "=r"(value
));
927 WARN_ONCE(1, "invalid counter number (%d)\n", counter
);
933 armv6pmu_write_counter(int counter
,
936 if (ARMV6_CYCLE_COUNTER
== counter
)
937 asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value
));
938 else if (ARMV6_COUNTER0
== counter
)
939 asm volatile("mcr p15, 0, %0, c15, c12, 2" : : "r"(value
));
940 else if (ARMV6_COUNTER1
== counter
)
941 asm volatile("mcr p15, 0, %0, c15, c12, 3" : : "r"(value
));
943 WARN_ONCE(1, "invalid counter number (%d)\n", counter
);
947 armv6pmu_enable_event(struct hw_perf_event
*hwc
,
950 unsigned long val
, mask
, evt
, flags
;
952 if (ARMV6_CYCLE_COUNTER
== idx
) {
954 evt
= ARMV6_PMCR_CCOUNT_IEN
;
955 } else if (ARMV6_COUNTER0
== idx
) {
956 mask
= ARMV6_PMCR_EVT_COUNT0_MASK
;
957 evt
= (hwc
->config_base
<< ARMV6_PMCR_EVT_COUNT0_SHIFT
) |
958 ARMV6_PMCR_COUNT0_IEN
;
959 } else if (ARMV6_COUNTER1
== idx
) {
960 mask
= ARMV6_PMCR_EVT_COUNT1_MASK
;
961 evt
= (hwc
->config_base
<< ARMV6_PMCR_EVT_COUNT1_SHIFT
) |
962 ARMV6_PMCR_COUNT1_IEN
;
964 WARN_ONCE(1, "invalid counter number (%d)\n", idx
);
969 * Mask out the current event and set the counter to count the event
970 * that we're interested in.
972 spin_lock_irqsave(&pmu_lock
, flags
);
973 val
= armv6_pmcr_read();
976 armv6_pmcr_write(val
);
977 spin_unlock_irqrestore(&pmu_lock
, flags
);
981 armv6pmu_handle_irq(int irq_num
,
984 unsigned long pmcr
= armv6_pmcr_read();
985 struct perf_sample_data data
;
986 struct cpu_hw_events
*cpuc
;
987 struct pt_regs
*regs
;
990 if (!armv6_pmcr_has_overflowed(pmcr
))
993 regs
= get_irq_regs();
996 * The interrupts are cleared by writing the overflow flags back to
997 * the control register. All of the other bits don't have any effect
998 * if they are rewritten, so write the whole value back.
1000 armv6_pmcr_write(pmcr
);
1002 perf_sample_data_init(&data
, 0);
1004 cpuc
= &__get_cpu_var(cpu_hw_events
);
1005 for (idx
= 0; idx
<= armpmu
->num_events
; ++idx
) {
1006 struct perf_event
*event
= cpuc
->events
[idx
];
1007 struct hw_perf_event
*hwc
;
1009 if (!test_bit(idx
, cpuc
->active_mask
))
1013 * We have a single interrupt for all counters. Check that
1014 * each counter has overflowed before we process it.
1016 if (!armv6_pmcr_counter_has_overflowed(pmcr
, idx
))
1020 armpmu_event_update(event
, hwc
, idx
);
1021 data
.period
= event
->hw
.last_period
;
1022 if (!armpmu_event_set_period(event
, hwc
, idx
))
1025 if (perf_event_overflow(event
, 0, &data
, regs
))
1026 armpmu
->disable(hwc
, idx
);
1030 * Handle the pending perf events.
1032 * Note: this call *must* be run with interrupts enabled. For
1033 * platforms that can have the PMU interrupts raised as a PMI, this
1036 perf_event_do_pending();
1042 armv6pmu_start(void)
1044 unsigned long flags
, val
;
1046 spin_lock_irqsave(&pmu_lock
, flags
);
1047 val
= armv6_pmcr_read();
1048 val
|= ARMV6_PMCR_ENABLE
;
1049 armv6_pmcr_write(val
);
1050 spin_unlock_irqrestore(&pmu_lock
, flags
);
1056 unsigned long flags
, val
;
1058 spin_lock_irqsave(&pmu_lock
, flags
);
1059 val
= armv6_pmcr_read();
1060 val
&= ~ARMV6_PMCR_ENABLE
;
1061 armv6_pmcr_write(val
);
1062 spin_unlock_irqrestore(&pmu_lock
, flags
);
1066 armv6pmu_event_map(int config
)
1068 int mapping
= armv6_perf_map
[config
];
1069 if (HW_OP_UNSUPPORTED
== mapping
)
1070 mapping
= -EOPNOTSUPP
;
1075 armv6mpcore_pmu_event_map(int config
)
1077 int mapping
= armv6mpcore_perf_map
[config
];
1078 if (HW_OP_UNSUPPORTED
== mapping
)
1079 mapping
= -EOPNOTSUPP
;
1084 armv6pmu_raw_event(u64 config
)
1086 return config
& 0xff;
1090 armv6pmu_get_event_idx(struct cpu_hw_events
*cpuc
,
1091 struct hw_perf_event
*event
)
1093 /* Always place a cycle counter into the cycle counter. */
1094 if (ARMV6_PERFCTR_CPU_CYCLES
== event
->config_base
) {
1095 if (test_and_set_bit(ARMV6_CYCLE_COUNTER
, cpuc
->used_mask
))
1098 return ARMV6_CYCLE_COUNTER
;
1101 * For anything other than a cycle counter, try and use
1102 * counter0 and counter1.
1104 if (!test_and_set_bit(ARMV6_COUNTER1
, cpuc
->used_mask
)) {
1105 return ARMV6_COUNTER1
;
1108 if (!test_and_set_bit(ARMV6_COUNTER0
, cpuc
->used_mask
)) {
1109 return ARMV6_COUNTER0
;
1112 /* The counters are all in use. */
1118 armv6pmu_disable_event(struct hw_perf_event
*hwc
,
1121 unsigned long val
, mask
, evt
, flags
;
1123 if (ARMV6_CYCLE_COUNTER
== idx
) {
1124 mask
= ARMV6_PMCR_CCOUNT_IEN
;
1126 } else if (ARMV6_COUNTER0
== idx
) {
1127 mask
= ARMV6_PMCR_COUNT0_IEN
| ARMV6_PMCR_EVT_COUNT0_MASK
;
1128 evt
= ARMV6_PERFCTR_NOP
<< ARMV6_PMCR_EVT_COUNT0_SHIFT
;
1129 } else if (ARMV6_COUNTER1
== idx
) {
1130 mask
= ARMV6_PMCR_COUNT1_IEN
| ARMV6_PMCR_EVT_COUNT1_MASK
;
1131 evt
= ARMV6_PERFCTR_NOP
<< ARMV6_PMCR_EVT_COUNT1_SHIFT
;
1133 WARN_ONCE(1, "invalid counter number (%d)\n", idx
);
1138 * Mask out the current event and set the counter to count the number
1139 * of ETM bus signal assertion cycles. The external reporting should
1140 * be disabled and so this should never increment.
1142 spin_lock_irqsave(&pmu_lock
, flags
);
1143 val
= armv6_pmcr_read();
1146 armv6_pmcr_write(val
);
1147 spin_unlock_irqrestore(&pmu_lock
, flags
);
1151 armv6mpcore_pmu_disable_event(struct hw_perf_event
*hwc
,
1154 unsigned long val
, mask
, flags
, evt
= 0;
1156 if (ARMV6_CYCLE_COUNTER
== idx
) {
1157 mask
= ARMV6_PMCR_CCOUNT_IEN
;
1158 } else if (ARMV6_COUNTER0
== idx
) {
1159 mask
= ARMV6_PMCR_COUNT0_IEN
;
1160 } else if (ARMV6_COUNTER1
== idx
) {
1161 mask
= ARMV6_PMCR_COUNT1_IEN
;
1163 WARN_ONCE(1, "invalid counter number (%d)\n", idx
);
1168 * Unlike UP ARMv6, we don't have a way of stopping the counters. We
1169 * simply disable the interrupt reporting.
1171 spin_lock_irqsave(&pmu_lock
, flags
);
1172 val
= armv6_pmcr_read();
1175 armv6_pmcr_write(val
);
1176 spin_unlock_irqrestore(&pmu_lock
, flags
);
1179 static const struct arm_pmu armv6pmu
= {
1180 .id
= ARM_PERF_PMU_ID_V6
,
1181 .handle_irq
= armv6pmu_handle_irq
,
1182 .enable
= armv6pmu_enable_event
,
1183 .disable
= armv6pmu_disable_event
,
1184 .event_map
= armv6pmu_event_map
,
1185 .raw_event
= armv6pmu_raw_event
,
1186 .read_counter
= armv6pmu_read_counter
,
1187 .write_counter
= armv6pmu_write_counter
,
1188 .get_event_idx
= armv6pmu_get_event_idx
,
1189 .start
= armv6pmu_start
,
1190 .stop
= armv6pmu_stop
,
1192 .max_period
= (1LLU << 32) - 1,
1196 * ARMv6mpcore is almost identical to single core ARMv6 with the exception
1197 * that some of the events have different enumerations and that there is no
1198 * *hack* to stop the programmable counters. To stop the counters we simply
1199 * disable the interrupt reporting and update the event. When unthrottling we
1200 * reset the period and enable the interrupt reporting.
1202 static const struct arm_pmu armv6mpcore_pmu
= {
1203 .id
= ARM_PERF_PMU_ID_V6MP
,
1204 .handle_irq
= armv6pmu_handle_irq
,
1205 .enable
= armv6pmu_enable_event
,
1206 .disable
= armv6mpcore_pmu_disable_event
,
1207 .event_map
= armv6mpcore_pmu_event_map
,
1208 .raw_event
= armv6pmu_raw_event
,
1209 .read_counter
= armv6pmu_read_counter
,
1210 .write_counter
= armv6pmu_write_counter
,
1211 .get_event_idx
= armv6pmu_get_event_idx
,
1212 .start
= armv6pmu_start
,
1213 .stop
= armv6pmu_stop
,
1215 .max_period
= (1LLU << 32) - 1,
1219 * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
1221 * Copied from ARMv6 code, with the low level code inspired
1222 * by the ARMv7 Oprofile code.
1224 * Cortex-A8 has up to 4 configurable performance counters and
1225 * a single cycle counter.
1226 * Cortex-A9 has up to 31 configurable performance counters and
1227 * a single cycle counter.
1229 * All counters can be enabled/disabled and IRQ masked separately. The cycle
1230 * counter and all 4 performance counters together can be reset separately.
1233 /* Common ARMv7 event types */
1234 enum armv7_perf_types
{
1235 ARMV7_PERFCTR_PMNC_SW_INCR
= 0x00,
1236 ARMV7_PERFCTR_IFETCH_MISS
= 0x01,
1237 ARMV7_PERFCTR_ITLB_MISS
= 0x02,
1238 ARMV7_PERFCTR_DCACHE_REFILL
= 0x03,
1239 ARMV7_PERFCTR_DCACHE_ACCESS
= 0x04,
1240 ARMV7_PERFCTR_DTLB_REFILL
= 0x05,
1241 ARMV7_PERFCTR_DREAD
= 0x06,
1242 ARMV7_PERFCTR_DWRITE
= 0x07,
1244 ARMV7_PERFCTR_EXC_TAKEN
= 0x09,
1245 ARMV7_PERFCTR_EXC_EXECUTED
= 0x0A,
1246 ARMV7_PERFCTR_CID_WRITE
= 0x0B,
1247 /* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
1249 * - all branch instructions,
1250 * - instructions that explicitly write the PC,
1251 * - exception generating instructions.
1253 ARMV7_PERFCTR_PC_WRITE
= 0x0C,
1254 ARMV7_PERFCTR_PC_IMM_BRANCH
= 0x0D,
1255 ARMV7_PERFCTR_UNALIGNED_ACCESS
= 0x0F,
1256 ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
= 0x10,
1257 ARMV7_PERFCTR_CLOCK_CYCLES
= 0x11,
1259 ARMV7_PERFCTR_PC_BRANCH_MIS_USED
= 0x12,
1261 ARMV7_PERFCTR_CPU_CYCLES
= 0xFF
1264 /* ARMv7 Cortex-A8 specific event types */
1265 enum armv7_a8_perf_types
{
1266 ARMV7_PERFCTR_INSTR_EXECUTED
= 0x08,
1268 ARMV7_PERFCTR_PC_PROC_RETURN
= 0x0E,
1270 ARMV7_PERFCTR_WRITE_BUFFER_FULL
= 0x40,
1271 ARMV7_PERFCTR_L2_STORE_MERGED
= 0x41,
1272 ARMV7_PERFCTR_L2_STORE_BUFF
= 0x42,
1273 ARMV7_PERFCTR_L2_ACCESS
= 0x43,
1274 ARMV7_PERFCTR_L2_CACH_MISS
= 0x44,
1275 ARMV7_PERFCTR_AXI_READ_CYCLES
= 0x45,
1276 ARMV7_PERFCTR_AXI_WRITE_CYCLES
= 0x46,
1277 ARMV7_PERFCTR_MEMORY_REPLAY
= 0x47,
1278 ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY
= 0x48,
1279 ARMV7_PERFCTR_L1_DATA_MISS
= 0x49,
1280 ARMV7_PERFCTR_L1_INST_MISS
= 0x4A,
1281 ARMV7_PERFCTR_L1_DATA_COLORING
= 0x4B,
1282 ARMV7_PERFCTR_L1_NEON_DATA
= 0x4C,
1283 ARMV7_PERFCTR_L1_NEON_CACH_DATA
= 0x4D,
1284 ARMV7_PERFCTR_L2_NEON
= 0x4E,
1285 ARMV7_PERFCTR_L2_NEON_HIT
= 0x4F,
1286 ARMV7_PERFCTR_L1_INST
= 0x50,
1287 ARMV7_PERFCTR_PC_RETURN_MIS_PRED
= 0x51,
1288 ARMV7_PERFCTR_PC_BRANCH_FAILED
= 0x52,
1289 ARMV7_PERFCTR_PC_BRANCH_TAKEN
= 0x53,
1290 ARMV7_PERFCTR_PC_BRANCH_EXECUTED
= 0x54,
1291 ARMV7_PERFCTR_OP_EXECUTED
= 0x55,
1292 ARMV7_PERFCTR_CYCLES_INST_STALL
= 0x56,
1293 ARMV7_PERFCTR_CYCLES_INST
= 0x57,
1294 ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL
= 0x58,
1295 ARMV7_PERFCTR_CYCLES_NEON_INST_STALL
= 0x59,
1296 ARMV7_PERFCTR_NEON_CYCLES
= 0x5A,
1298 ARMV7_PERFCTR_PMU0_EVENTS
= 0x70,
1299 ARMV7_PERFCTR_PMU1_EVENTS
= 0x71,
1300 ARMV7_PERFCTR_PMU_EVENTS
= 0x72,
1303 /* ARMv7 Cortex-A9 specific event types */
1304 enum armv7_a9_perf_types
{
1305 ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC
= 0x40,
1306 ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC
= 0x41,
1307 ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC
= 0x42,
1309 ARMV7_PERFCTR_COHERENT_LINE_MISS
= 0x50,
1310 ARMV7_PERFCTR_COHERENT_LINE_HIT
= 0x51,
1312 ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES
= 0x60,
1313 ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES
= 0x61,
1314 ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES
= 0x62,
1315 ARMV7_PERFCTR_STREX_EXECUTED_PASSED
= 0x63,
1316 ARMV7_PERFCTR_STREX_EXECUTED_FAILED
= 0x64,
1317 ARMV7_PERFCTR_DATA_EVICTION
= 0x65,
1318 ARMV7_PERFCTR_ISSUE_STAGE_NO_INST
= 0x66,
1319 ARMV7_PERFCTR_ISSUE_STAGE_EMPTY
= 0x67,
1320 ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE
= 0x68,
1322 ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS
= 0x6E,
1324 ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST
= 0x70,
1325 ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST
= 0x71,
1326 ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST
= 0x72,
1327 ARMV7_PERFCTR_FP_EXECUTED_INST
= 0x73,
1328 ARMV7_PERFCTR_NEON_EXECUTED_INST
= 0x74,
1330 ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES
= 0x80,
1331 ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES
= 0x81,
1332 ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES
= 0x82,
1333 ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES
= 0x83,
1334 ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES
= 0x84,
1335 ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES
= 0x85,
1336 ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES
= 0x86,
1338 ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES
= 0x8A,
1339 ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES
= 0x8B,
1341 ARMV7_PERFCTR_ISB_INST
= 0x90,
1342 ARMV7_PERFCTR_DSB_INST
= 0x91,
1343 ARMV7_PERFCTR_DMB_INST
= 0x92,
1344 ARMV7_PERFCTR_EXT_INTERRUPTS
= 0x93,
1346 ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED
= 0xA0,
1347 ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED
= 0xA1,
1348 ARMV7_PERFCTR_PLE_FIFO_FLUSH
= 0xA2,
1349 ARMV7_PERFCTR_PLE_RQST_COMPLETED
= 0xA3,
1350 ARMV7_PERFCTR_PLE_FIFO_OVERFLOW
= 0xA4,
1351 ARMV7_PERFCTR_PLE_RQST_PROG
= 0xA5
1355 * Cortex-A8 HW events mapping
1357 * The hardware events that we support. We do support cache operations but
1358 * we have harvard caches and no way to combine instruction and data
1359 * accesses/misses in hardware.
1361 static const unsigned armv7_a8_perf_map
[PERF_COUNT_HW_MAX
] = {
1362 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV7_PERFCTR_CPU_CYCLES
,
1363 [PERF_COUNT_HW_INSTRUCTIONS
] = ARMV7_PERFCTR_INSTR_EXECUTED
,
1364 [PERF_COUNT_HW_CACHE_REFERENCES
] = HW_OP_UNSUPPORTED
,
1365 [PERF_COUNT_HW_CACHE_MISSES
] = HW_OP_UNSUPPORTED
,
1366 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = ARMV7_PERFCTR_PC_WRITE
,
1367 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
1368 [PERF_COUNT_HW_BUS_CYCLES
] = ARMV7_PERFCTR_CLOCK_CYCLES
,
1371 static const unsigned armv7_a8_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
1372 [PERF_COUNT_HW_CACHE_OP_MAX
]
1373 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
1376 * The performance counters don't differentiate between read
1377 * and write accesses/misses so this isn't strictly correct,
1378 * but it's the best we can do. Writes and reads get
1382 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_DCACHE_ACCESS
,
1383 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DCACHE_REFILL
,
1386 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_DCACHE_ACCESS
,
1387 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DCACHE_REFILL
,
1389 [C(OP_PREFETCH
)] = {
1390 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1391 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1396 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_INST
,
1397 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_INST_MISS
,
1400 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_INST
,
1401 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_INST_MISS
,
1403 [C(OP_PREFETCH
)] = {
1404 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1405 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1410 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L2_ACCESS
,
1411 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L2_CACH_MISS
,
1414 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L2_ACCESS
,
1415 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L2_CACH_MISS
,
1417 [C(OP_PREFETCH
)] = {
1418 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1419 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1424 * Only ITLB misses and DTLB refills are supported.
1425 * If users want the DTLB refills misses a raw counter
1429 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1430 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
1433 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1434 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
1436 [C(OP_PREFETCH
)] = {
1437 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1438 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1443 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1444 [C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_MISS
,
1447 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1448 [C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_MISS
,
1450 [C(OP_PREFETCH
)] = {
1451 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1452 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1457 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_WRITE
,
1459 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
1462 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_WRITE
,
1464 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
1466 [C(OP_PREFETCH
)] = {
1467 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1468 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1474 * Cortex-A9 HW events mapping
1476 static const unsigned armv7_a9_perf_map
[PERF_COUNT_HW_MAX
] = {
1477 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV7_PERFCTR_CPU_CYCLES
,
1478 [PERF_COUNT_HW_INSTRUCTIONS
] =
1479 ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE
,
1480 [PERF_COUNT_HW_CACHE_REFERENCES
] = ARMV7_PERFCTR_COHERENT_LINE_HIT
,
1481 [PERF_COUNT_HW_CACHE_MISSES
] = ARMV7_PERFCTR_COHERENT_LINE_MISS
,
1482 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = ARMV7_PERFCTR_PC_WRITE
,
1483 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
1484 [PERF_COUNT_HW_BUS_CYCLES
] = ARMV7_PERFCTR_CLOCK_CYCLES
,
1487 static const unsigned armv7_a9_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
1488 [PERF_COUNT_HW_CACHE_OP_MAX
]
1489 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
1492 * The performance counters don't differentiate between read
1493 * and write accesses/misses so this isn't strictly correct,
1494 * but it's the best we can do. Writes and reads get
1498 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_DCACHE_ACCESS
,
1499 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DCACHE_REFILL
,
1502 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_DCACHE_ACCESS
,
1503 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DCACHE_REFILL
,
1505 [C(OP_PREFETCH
)] = {
1506 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1507 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1512 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1513 [C(RESULT_MISS
)] = ARMV7_PERFCTR_IFETCH_MISS
,
1516 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1517 [C(RESULT_MISS
)] = ARMV7_PERFCTR_IFETCH_MISS
,
1519 [C(OP_PREFETCH
)] = {
1520 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1521 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1526 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1527 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1530 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1531 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1533 [C(OP_PREFETCH
)] = {
1534 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1535 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1540 * Only ITLB misses and DTLB refills are supported.
1541 * If users want the DTLB refills misses a raw counter
1545 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1546 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
1549 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1550 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
1552 [C(OP_PREFETCH
)] = {
1553 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1554 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1559 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1560 [C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_MISS
,
1563 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1564 [C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_MISS
,
1566 [C(OP_PREFETCH
)] = {
1567 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1568 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1573 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_WRITE
,
1575 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
1578 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_WRITE
,
1580 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
1582 [C(OP_PREFETCH
)] = {
1583 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1584 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1590 * Perf Events counters
1592 enum armv7_counters
{
1593 ARMV7_CYCLE_COUNTER
= 1, /* Cycle counter */
1594 ARMV7_COUNTER0
= 2, /* First event counter */
1598 * The cycle counter is ARMV7_CYCLE_COUNTER.
1599 * The first event counter is ARMV7_COUNTER0.
1600 * The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1).
1602 #define ARMV7_COUNTER_LAST (ARMV7_COUNTER0 + armpmu->num_events - 1)
1605 * ARMv7 low level PMNC access
1609 * Per-CPU PMNC: config reg
1611 #define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
1612 #define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
1613 #define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
1614 #define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
1615 #define ARMV7_PMNC_X (1 << 4) /* Export to ETM */
1616 #define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
1617 #define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */
1618 #define ARMV7_PMNC_N_MASK 0x1f
1619 #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
1622 * Available counters
1624 #define ARMV7_CNT0 0 /* First event counter */
1625 #define ARMV7_CCNT 31 /* Cycle counter */
1627 /* Perf Event to low level counters mapping */
1628 #define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0)
1631 * CNTENS: counters enable reg
1633 #define ARMV7_CNTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1634 #define ARMV7_CNTENS_C (1 << ARMV7_CCNT)
1637 * CNTENC: counters disable reg
1639 #define ARMV7_CNTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1640 #define ARMV7_CNTENC_C (1 << ARMV7_CCNT)
1643 * INTENS: counters overflow interrupt enable reg
1645 #define ARMV7_INTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1646 #define ARMV7_INTENS_C (1 << ARMV7_CCNT)
1649 * INTENC: counters overflow interrupt disable reg
1651 #define ARMV7_INTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1652 #define ARMV7_INTENC_C (1 << ARMV7_CCNT)
1655 * EVTSEL: Event selection reg
1657 #define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */
1660 * SELECT: Counter selection reg
1662 #define ARMV7_SELECT_MASK 0x1f /* Mask for writable bits */
1665 * FLAG: counters overflow flag status reg
1667 #define ARMV7_FLAG_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1668 #define ARMV7_FLAG_C (1 << ARMV7_CCNT)
1669 #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
1670 #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
1672 static inline unsigned long armv7_pmnc_read(void)
1675 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val
));
1679 static inline void armv7_pmnc_write(unsigned long val
)
1681 val
&= ARMV7_PMNC_MASK
;
1682 asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val
));
1685 static inline int armv7_pmnc_has_overflowed(unsigned long pmnc
)
1687 return pmnc
& ARMV7_OVERFLOWED_MASK
;
1690 static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc
,
1691 enum armv7_counters counter
)
1695 if (counter
== ARMV7_CYCLE_COUNTER
)
1696 ret
= pmnc
& ARMV7_FLAG_C
;
1697 else if ((counter
>= ARMV7_COUNTER0
) && (counter
<= ARMV7_COUNTER_LAST
))
1698 ret
= pmnc
& ARMV7_FLAG_P(counter
);
1700 pr_err("CPU%u checking wrong counter %d overflow status\n",
1701 smp_processor_id(), counter
);
1706 static inline int armv7_pmnc_select_counter(unsigned int idx
)
1710 if ((idx
< ARMV7_COUNTER0
) || (idx
> ARMV7_COUNTER_LAST
)) {
1711 pr_err("CPU%u selecting wrong PMNC counter"
1712 " %d\n", smp_processor_id(), idx
);
1716 val
= (idx
- ARMV7_EVENT_CNT_TO_CNTx
) & ARMV7_SELECT_MASK
;
1717 asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val
));
1722 static inline u32
armv7pmu_read_counter(int idx
)
1724 unsigned long value
= 0;
1726 if (idx
== ARMV7_CYCLE_COUNTER
)
1727 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value
));
1728 else if ((idx
>= ARMV7_COUNTER0
) && (idx
<= ARMV7_COUNTER_LAST
)) {
1729 if (armv7_pmnc_select_counter(idx
) == idx
)
1730 asm volatile("mrc p15, 0, %0, c9, c13, 2"
1733 pr_err("CPU%u reading wrong counter %d\n",
1734 smp_processor_id(), idx
);
1739 static inline void armv7pmu_write_counter(int idx
, u32 value
)
1741 if (idx
== ARMV7_CYCLE_COUNTER
)
1742 asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value
));
1743 else if ((idx
>= ARMV7_COUNTER0
) && (idx
<= ARMV7_COUNTER_LAST
)) {
1744 if (armv7_pmnc_select_counter(idx
) == idx
)
1745 asm volatile("mcr p15, 0, %0, c9, c13, 2"
1748 pr_err("CPU%u writing wrong counter %d\n",
1749 smp_processor_id(), idx
);
1752 static inline void armv7_pmnc_write_evtsel(unsigned int idx
, u32 val
)
1754 if (armv7_pmnc_select_counter(idx
) == idx
) {
1755 val
&= ARMV7_EVTSEL_MASK
;
1756 asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val
));
1760 static inline u32
armv7_pmnc_enable_counter(unsigned int idx
)
1764 if ((idx
!= ARMV7_CYCLE_COUNTER
) &&
1765 ((idx
< ARMV7_COUNTER0
) || (idx
> ARMV7_COUNTER_LAST
))) {
1766 pr_err("CPU%u enabling wrong PMNC counter"
1767 " %d\n", smp_processor_id(), idx
);
1771 if (idx
== ARMV7_CYCLE_COUNTER
)
1772 val
= ARMV7_CNTENS_C
;
1774 val
= ARMV7_CNTENS_P(idx
);
1776 asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val
));
1781 static inline u32
armv7_pmnc_disable_counter(unsigned int idx
)
1786 if ((idx
!= ARMV7_CYCLE_COUNTER
) &&
1787 ((idx
< ARMV7_COUNTER0
) || (idx
> ARMV7_COUNTER_LAST
))) {
1788 pr_err("CPU%u disabling wrong PMNC counter"
1789 " %d\n", smp_processor_id(), idx
);
1793 if (idx
== ARMV7_CYCLE_COUNTER
)
1794 val
= ARMV7_CNTENC_C
;
1796 val
= ARMV7_CNTENC_P(idx
);
1798 asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val
));
1803 static inline u32
armv7_pmnc_enable_intens(unsigned int idx
)
1807 if ((idx
!= ARMV7_CYCLE_COUNTER
) &&
1808 ((idx
< ARMV7_COUNTER0
) || (idx
> ARMV7_COUNTER_LAST
))) {
1809 pr_err("CPU%u enabling wrong PMNC counter"
1810 " interrupt enable %d\n", smp_processor_id(), idx
);
1814 if (idx
== ARMV7_CYCLE_COUNTER
)
1815 val
= ARMV7_INTENS_C
;
1817 val
= ARMV7_INTENS_P(idx
);
1819 asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val
));
1824 static inline u32
armv7_pmnc_disable_intens(unsigned int idx
)
1828 if ((idx
!= ARMV7_CYCLE_COUNTER
) &&
1829 ((idx
< ARMV7_COUNTER0
) || (idx
> ARMV7_COUNTER_LAST
))) {
1830 pr_err("CPU%u disabling wrong PMNC counter"
1831 " interrupt enable %d\n", smp_processor_id(), idx
);
1835 if (idx
== ARMV7_CYCLE_COUNTER
)
1836 val
= ARMV7_INTENC_C
;
1838 val
= ARMV7_INTENC_P(idx
);
1840 asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val
));
1845 static inline u32
armv7_pmnc_getreset_flags(void)
1850 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val
));
1852 /* Write to clear flags */
1853 val
&= ARMV7_FLAG_MASK
;
1854 asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val
));
1860 static void armv7_pmnc_dump_regs(void)
1865 printk(KERN_INFO
"PMNC registers dump:\n");
1867 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val
));
1868 printk(KERN_INFO
"PMNC =0x%08x\n", val
);
1870 asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val
));
1871 printk(KERN_INFO
"CNTENS=0x%08x\n", val
);
1873 asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val
));
1874 printk(KERN_INFO
"INTENS=0x%08x\n", val
);
1876 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val
));
1877 printk(KERN_INFO
"FLAGS =0x%08x\n", val
);
1879 asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val
));
1880 printk(KERN_INFO
"SELECT=0x%08x\n", val
);
1882 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val
));
1883 printk(KERN_INFO
"CCNT =0x%08x\n", val
);
1885 for (cnt
= ARMV7_COUNTER0
; cnt
< ARMV7_COUNTER_LAST
; cnt
++) {
1886 armv7_pmnc_select_counter(cnt
);
1887 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val
));
1888 printk(KERN_INFO
"CNT[%d] count =0x%08x\n",
1889 cnt
-ARMV7_EVENT_CNT_TO_CNTx
, val
);
1890 asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val
));
1891 printk(KERN_INFO
"CNT[%d] evtsel=0x%08x\n",
1892 cnt
-ARMV7_EVENT_CNT_TO_CNTx
, val
);
1897 void armv7pmu_enable_event(struct hw_perf_event
*hwc
, int idx
)
1899 unsigned long flags
;
1902 * Enable counter and interrupt, and set the counter to count
1903 * the event that we're interested in.
1905 spin_lock_irqsave(&pmu_lock
, flags
);
1910 armv7_pmnc_disable_counter(idx
);
1913 * Set event (if destined for PMNx counters)
1914 * We don't need to set the event if it's a cycle count
1916 if (idx
!= ARMV7_CYCLE_COUNTER
)
1917 armv7_pmnc_write_evtsel(idx
, hwc
->config_base
);
1920 * Enable interrupt for this counter
1922 armv7_pmnc_enable_intens(idx
);
1927 armv7_pmnc_enable_counter(idx
);
1929 spin_unlock_irqrestore(&pmu_lock
, flags
);
1932 static void armv7pmu_disable_event(struct hw_perf_event
*hwc
, int idx
)
1934 unsigned long flags
;
1937 * Disable counter and interrupt
1939 spin_lock_irqsave(&pmu_lock
, flags
);
1944 armv7_pmnc_disable_counter(idx
);
1947 * Disable interrupt for this counter
1949 armv7_pmnc_disable_intens(idx
);
1951 spin_unlock_irqrestore(&pmu_lock
, flags
);
1954 static irqreturn_t
armv7pmu_handle_irq(int irq_num
, void *dev
)
1957 struct perf_sample_data data
;
1958 struct cpu_hw_events
*cpuc
;
1959 struct pt_regs
*regs
;
1963 * Get and reset the IRQ flags
1965 pmnc
= armv7_pmnc_getreset_flags();
1968 * Did an overflow occur?
1970 if (!armv7_pmnc_has_overflowed(pmnc
))
1974 * Handle the counter(s) overflow(s)
1976 regs
= get_irq_regs();
1978 perf_sample_data_init(&data
, 0);
1980 cpuc
= &__get_cpu_var(cpu_hw_events
);
1981 for (idx
= 0; idx
<= armpmu
->num_events
; ++idx
) {
1982 struct perf_event
*event
= cpuc
->events
[idx
];
1983 struct hw_perf_event
*hwc
;
1985 if (!test_bit(idx
, cpuc
->active_mask
))
1989 * We have a single interrupt for all counters. Check that
1990 * each counter has overflowed before we process it.
1992 if (!armv7_pmnc_counter_has_overflowed(pmnc
, idx
))
1996 armpmu_event_update(event
, hwc
, idx
);
1997 data
.period
= event
->hw
.last_period
;
1998 if (!armpmu_event_set_period(event
, hwc
, idx
))
2001 if (perf_event_overflow(event
, 0, &data
, regs
))
2002 armpmu
->disable(hwc
, idx
);
2006 * Handle the pending perf events.
2008 * Note: this call *must* be run with interrupts enabled. For
2009 * platforms that can have the PMU interrupts raised as a PMI, this
2012 perf_event_do_pending();
2017 static void armv7pmu_start(void)
2019 unsigned long flags
;
2021 spin_lock_irqsave(&pmu_lock
, flags
);
2022 /* Enable all counters */
2023 armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E
);
2024 spin_unlock_irqrestore(&pmu_lock
, flags
);
2027 static void armv7pmu_stop(void)
2029 unsigned long flags
;
2031 spin_lock_irqsave(&pmu_lock
, flags
);
2032 /* Disable all counters */
2033 armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E
);
2034 spin_unlock_irqrestore(&pmu_lock
, flags
);
2037 static inline int armv7_a8_pmu_event_map(int config
)
2039 int mapping
= armv7_a8_perf_map
[config
];
2040 if (HW_OP_UNSUPPORTED
== mapping
)
2041 mapping
= -EOPNOTSUPP
;
2045 static inline int armv7_a9_pmu_event_map(int config
)
2047 int mapping
= armv7_a9_perf_map
[config
];
2048 if (HW_OP_UNSUPPORTED
== mapping
)
2049 mapping
= -EOPNOTSUPP
;
2053 static u64
armv7pmu_raw_event(u64 config
)
2055 return config
& 0xff;
2058 static int armv7pmu_get_event_idx(struct cpu_hw_events
*cpuc
,
2059 struct hw_perf_event
*event
)
2063 /* Always place a cycle counter into the cycle counter. */
2064 if (event
->config_base
== ARMV7_PERFCTR_CPU_CYCLES
) {
2065 if (test_and_set_bit(ARMV7_CYCLE_COUNTER
, cpuc
->used_mask
))
2068 return ARMV7_CYCLE_COUNTER
;
2071 * For anything other than a cycle counter, try and use
2072 * the events counters
2074 for (idx
= ARMV7_COUNTER0
; idx
<= armpmu
->num_events
; ++idx
) {
2075 if (!test_and_set_bit(idx
, cpuc
->used_mask
))
2079 /* The counters are all in use. */
2084 static struct arm_pmu armv7pmu
= {
2085 .handle_irq
= armv7pmu_handle_irq
,
2086 .enable
= armv7pmu_enable_event
,
2087 .disable
= armv7pmu_disable_event
,
2088 .raw_event
= armv7pmu_raw_event
,
2089 .read_counter
= armv7pmu_read_counter
,
2090 .write_counter
= armv7pmu_write_counter
,
2091 .get_event_idx
= armv7pmu_get_event_idx
,
2092 .start
= armv7pmu_start
,
2093 .stop
= armv7pmu_stop
,
2094 .max_period
= (1LLU << 32) - 1,
2097 static u32 __init
armv7_reset_read_pmnc(void)
2101 /* Initialize & Reset PMNC: C and P bits */
2102 armv7_pmnc_write(ARMV7_PMNC_P
| ARMV7_PMNC_C
);
2104 /* Read the nb of CNTx counters supported from PMNC */
2105 nb_cnt
= (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT
) & ARMV7_PMNC_N_MASK
;
2107 /* Add the CPU cycles counter and return */
2112 * ARMv5 [xscale] Performance counter handling code.
2114 * Based on xscale OProfile code.
2116 * There are two variants of the xscale PMU that we support:
2117 * - xscale1pmu: 2 event counters and a cycle counter
2118 * - xscale2pmu: 4 event counters and a cycle counter
2119 * The two variants share event definitions, but have different
2123 enum xscale_perf_types
{
2124 XSCALE_PERFCTR_ICACHE_MISS
= 0x00,
2125 XSCALE_PERFCTR_ICACHE_NO_DELIVER
= 0x01,
2126 XSCALE_PERFCTR_DATA_STALL
= 0x02,
2127 XSCALE_PERFCTR_ITLB_MISS
= 0x03,
2128 XSCALE_PERFCTR_DTLB_MISS
= 0x04,
2129 XSCALE_PERFCTR_BRANCH
= 0x05,
2130 XSCALE_PERFCTR_BRANCH_MISS
= 0x06,
2131 XSCALE_PERFCTR_INSTRUCTION
= 0x07,
2132 XSCALE_PERFCTR_DCACHE_FULL_STALL
= 0x08,
2133 XSCALE_PERFCTR_DCACHE_FULL_STALL_CONTIG
= 0x09,
2134 XSCALE_PERFCTR_DCACHE_ACCESS
= 0x0A,
2135 XSCALE_PERFCTR_DCACHE_MISS
= 0x0B,
2136 XSCALE_PERFCTR_DCACHE_WRITE_BACK
= 0x0C,
2137 XSCALE_PERFCTR_PC_CHANGED
= 0x0D,
2138 XSCALE_PERFCTR_BCU_REQUEST
= 0x10,
2139 XSCALE_PERFCTR_BCU_FULL
= 0x11,
2140 XSCALE_PERFCTR_BCU_DRAIN
= 0x12,
2141 XSCALE_PERFCTR_BCU_ECC_NO_ELOG
= 0x14,
2142 XSCALE_PERFCTR_BCU_1_BIT_ERR
= 0x15,
2143 XSCALE_PERFCTR_RMW
= 0x16,
2144 /* XSCALE_PERFCTR_CCNT is not hardware defined */
2145 XSCALE_PERFCTR_CCNT
= 0xFE,
2146 XSCALE_PERFCTR_UNUSED
= 0xFF,
2149 enum xscale_counters
{
2150 XSCALE_CYCLE_COUNTER
= 1,
2157 static const unsigned xscale_perf_map
[PERF_COUNT_HW_MAX
] = {
2158 [PERF_COUNT_HW_CPU_CYCLES
] = XSCALE_PERFCTR_CCNT
,
2159 [PERF_COUNT_HW_INSTRUCTIONS
] = XSCALE_PERFCTR_INSTRUCTION
,
2160 [PERF_COUNT_HW_CACHE_REFERENCES
] = HW_OP_UNSUPPORTED
,
2161 [PERF_COUNT_HW_CACHE_MISSES
] = HW_OP_UNSUPPORTED
,
2162 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = XSCALE_PERFCTR_BRANCH
,
2163 [PERF_COUNT_HW_BRANCH_MISSES
] = XSCALE_PERFCTR_BRANCH_MISS
,
2164 [PERF_COUNT_HW_BUS_CYCLES
] = HW_OP_UNSUPPORTED
,
2167 static const unsigned xscale_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
2168 [PERF_COUNT_HW_CACHE_OP_MAX
]
2169 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
2172 [C(RESULT_ACCESS
)] = XSCALE_PERFCTR_DCACHE_ACCESS
,
2173 [C(RESULT_MISS
)] = XSCALE_PERFCTR_DCACHE_MISS
,
2176 [C(RESULT_ACCESS
)] = XSCALE_PERFCTR_DCACHE_ACCESS
,
2177 [C(RESULT_MISS
)] = XSCALE_PERFCTR_DCACHE_MISS
,
2179 [C(OP_PREFETCH
)] = {
2180 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2181 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
2186 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2187 [C(RESULT_MISS
)] = XSCALE_PERFCTR_ICACHE_MISS
,
2190 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2191 [C(RESULT_MISS
)] = XSCALE_PERFCTR_ICACHE_MISS
,
2193 [C(OP_PREFETCH
)] = {
2194 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2195 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
2200 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2201 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
2204 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2205 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
2207 [C(OP_PREFETCH
)] = {
2208 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2209 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
2214 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2215 [C(RESULT_MISS
)] = XSCALE_PERFCTR_DTLB_MISS
,
2218 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2219 [C(RESULT_MISS
)] = XSCALE_PERFCTR_DTLB_MISS
,
2221 [C(OP_PREFETCH
)] = {
2222 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2223 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
2228 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2229 [C(RESULT_MISS
)] = XSCALE_PERFCTR_ITLB_MISS
,
2232 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2233 [C(RESULT_MISS
)] = XSCALE_PERFCTR_ITLB_MISS
,
2235 [C(OP_PREFETCH
)] = {
2236 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2237 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
2242 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2243 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
2246 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2247 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
2249 [C(OP_PREFETCH
)] = {
2250 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2251 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
2256 #define XSCALE_PMU_ENABLE 0x001
2257 #define XSCALE_PMN_RESET 0x002
2258 #define XSCALE_CCNT_RESET 0x004
2259 #define XSCALE_PMU_RESET (CCNT_RESET | PMN_RESET)
2260 #define XSCALE_PMU_CNT64 0x008
2263 xscalepmu_event_map(int config
)
2265 int mapping
= xscale_perf_map
[config
];
2266 if (HW_OP_UNSUPPORTED
== mapping
)
2267 mapping
= -EOPNOTSUPP
;
2272 xscalepmu_raw_event(u64 config
)
2274 return config
& 0xff;
2277 #define XSCALE1_OVERFLOWED_MASK 0x700
2278 #define XSCALE1_CCOUNT_OVERFLOW 0x400
2279 #define XSCALE1_COUNT0_OVERFLOW 0x100
2280 #define XSCALE1_COUNT1_OVERFLOW 0x200
2281 #define XSCALE1_CCOUNT_INT_EN 0x040
2282 #define XSCALE1_COUNT0_INT_EN 0x010
2283 #define XSCALE1_COUNT1_INT_EN 0x020
2284 #define XSCALE1_COUNT0_EVT_SHFT 12
2285 #define XSCALE1_COUNT0_EVT_MASK (0xff << XSCALE1_COUNT0_EVT_SHFT)
2286 #define XSCALE1_COUNT1_EVT_SHFT 20
2287 #define XSCALE1_COUNT1_EVT_MASK (0xff << XSCALE1_COUNT1_EVT_SHFT)
2290 xscale1pmu_read_pmnc(void)
2293 asm volatile("mrc p14, 0, %0, c0, c0, 0" : "=r" (val
));
2298 xscale1pmu_write_pmnc(u32 val
)
2300 /* upper 4bits and 7, 11 are write-as-0 */
2302 asm volatile("mcr p14, 0, %0, c0, c0, 0" : : "r" (val
));
2306 xscale1_pmnc_counter_has_overflowed(unsigned long pmnc
,
2307 enum xscale_counters counter
)
2312 case XSCALE_CYCLE_COUNTER
:
2313 ret
= pmnc
& XSCALE1_CCOUNT_OVERFLOW
;
2315 case XSCALE_COUNTER0
:
2316 ret
= pmnc
& XSCALE1_COUNT0_OVERFLOW
;
2318 case XSCALE_COUNTER1
:
2319 ret
= pmnc
& XSCALE1_COUNT1_OVERFLOW
;
2322 WARN_ONCE(1, "invalid counter number (%d)\n", counter
);
2329 xscale1pmu_handle_irq(int irq_num
, void *dev
)
2332 struct perf_sample_data data
;
2333 struct cpu_hw_events
*cpuc
;
2334 struct pt_regs
*regs
;
2338 * NOTE: there's an A stepping erratum that states if an overflow
2339 * bit already exists and another occurs, the previous
2340 * Overflow bit gets cleared. There's no workaround.
2341 * Fixed in B stepping or later.
2343 pmnc
= xscale1pmu_read_pmnc();
2346 * Write the value back to clear the overflow flags. Overflow
2347 * flags remain in pmnc for use below. We also disable the PMU
2348 * while we process the interrupt.
2350 xscale1pmu_write_pmnc(pmnc
& ~XSCALE_PMU_ENABLE
);
2352 if (!(pmnc
& XSCALE1_OVERFLOWED_MASK
))
2355 regs
= get_irq_regs();
2357 perf_sample_data_init(&data
, 0);
2359 cpuc
= &__get_cpu_var(cpu_hw_events
);
2360 for (idx
= 0; idx
<= armpmu
->num_events
; ++idx
) {
2361 struct perf_event
*event
= cpuc
->events
[idx
];
2362 struct hw_perf_event
*hwc
;
2364 if (!test_bit(idx
, cpuc
->active_mask
))
2367 if (!xscale1_pmnc_counter_has_overflowed(pmnc
, idx
))
2371 armpmu_event_update(event
, hwc
, idx
);
2372 data
.period
= event
->hw
.last_period
;
2373 if (!armpmu_event_set_period(event
, hwc
, idx
))
2376 if (perf_event_overflow(event
, 0, &data
, regs
))
2377 armpmu
->disable(hwc
, idx
);
2380 perf_event_do_pending();
2383 * Re-enable the PMU.
2385 pmnc
= xscale1pmu_read_pmnc() | XSCALE_PMU_ENABLE
;
2386 xscale1pmu_write_pmnc(pmnc
);
2392 xscale1pmu_enable_event(struct hw_perf_event
*hwc
, int idx
)
2394 unsigned long val
, mask
, evt
, flags
;
2397 case XSCALE_CYCLE_COUNTER
:
2399 evt
= XSCALE1_CCOUNT_INT_EN
;
2401 case XSCALE_COUNTER0
:
2402 mask
= XSCALE1_COUNT0_EVT_MASK
;
2403 evt
= (hwc
->config_base
<< XSCALE1_COUNT0_EVT_SHFT
) |
2404 XSCALE1_COUNT0_INT_EN
;
2406 case XSCALE_COUNTER1
:
2407 mask
= XSCALE1_COUNT1_EVT_MASK
;
2408 evt
= (hwc
->config_base
<< XSCALE1_COUNT1_EVT_SHFT
) |
2409 XSCALE1_COUNT1_INT_EN
;
2412 WARN_ONCE(1, "invalid counter number (%d)\n", idx
);
2416 spin_lock_irqsave(&pmu_lock
, flags
);
2417 val
= xscale1pmu_read_pmnc();
2420 xscale1pmu_write_pmnc(val
);
2421 spin_unlock_irqrestore(&pmu_lock
, flags
);
2425 xscale1pmu_disable_event(struct hw_perf_event
*hwc
, int idx
)
2427 unsigned long val
, mask
, evt
, flags
;
2430 case XSCALE_CYCLE_COUNTER
:
2431 mask
= XSCALE1_CCOUNT_INT_EN
;
2434 case XSCALE_COUNTER0
:
2435 mask
= XSCALE1_COUNT0_INT_EN
| XSCALE1_COUNT0_EVT_MASK
;
2436 evt
= XSCALE_PERFCTR_UNUSED
<< XSCALE1_COUNT0_EVT_SHFT
;
2438 case XSCALE_COUNTER1
:
2439 mask
= XSCALE1_COUNT1_INT_EN
| XSCALE1_COUNT1_EVT_MASK
;
2440 evt
= XSCALE_PERFCTR_UNUSED
<< XSCALE1_COUNT1_EVT_SHFT
;
2443 WARN_ONCE(1, "invalid counter number (%d)\n", idx
);
2447 spin_lock_irqsave(&pmu_lock
, flags
);
2448 val
= xscale1pmu_read_pmnc();
2451 xscale1pmu_write_pmnc(val
);
2452 spin_unlock_irqrestore(&pmu_lock
, flags
);
2456 xscale1pmu_get_event_idx(struct cpu_hw_events
*cpuc
,
2457 struct hw_perf_event
*event
)
2459 if (XSCALE_PERFCTR_CCNT
== event
->config_base
) {
2460 if (test_and_set_bit(XSCALE_CYCLE_COUNTER
, cpuc
->used_mask
))
2463 return XSCALE_CYCLE_COUNTER
;
2465 if (!test_and_set_bit(XSCALE_COUNTER1
, cpuc
->used_mask
)) {
2466 return XSCALE_COUNTER1
;
2469 if (!test_and_set_bit(XSCALE_COUNTER0
, cpuc
->used_mask
)) {
2470 return XSCALE_COUNTER0
;
2478 xscale1pmu_start(void)
2480 unsigned long flags
, val
;
2482 spin_lock_irqsave(&pmu_lock
, flags
);
2483 val
= xscale1pmu_read_pmnc();
2484 val
|= XSCALE_PMU_ENABLE
;
2485 xscale1pmu_write_pmnc(val
);
2486 spin_unlock_irqrestore(&pmu_lock
, flags
);
2490 xscale1pmu_stop(void)
2492 unsigned long flags
, val
;
2494 spin_lock_irqsave(&pmu_lock
, flags
);
2495 val
= xscale1pmu_read_pmnc();
2496 val
&= ~XSCALE_PMU_ENABLE
;
2497 xscale1pmu_write_pmnc(val
);
2498 spin_unlock_irqrestore(&pmu_lock
, flags
);
2502 xscale1pmu_read_counter(int counter
)
2507 case XSCALE_CYCLE_COUNTER
:
2508 asm volatile("mrc p14, 0, %0, c1, c0, 0" : "=r" (val
));
2510 case XSCALE_COUNTER0
:
2511 asm volatile("mrc p14, 0, %0, c2, c0, 0" : "=r" (val
));
2513 case XSCALE_COUNTER1
:
2514 asm volatile("mrc p14, 0, %0, c3, c0, 0" : "=r" (val
));
2522 xscale1pmu_write_counter(int counter
, u32 val
)
2525 case XSCALE_CYCLE_COUNTER
:
2526 asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val
));
2528 case XSCALE_COUNTER0
:
2529 asm volatile("mcr p14, 0, %0, c2, c0, 0" : : "r" (val
));
2531 case XSCALE_COUNTER1
:
2532 asm volatile("mcr p14, 0, %0, c3, c0, 0" : : "r" (val
));
2537 static const struct arm_pmu xscale1pmu
= {
2538 .id
= ARM_PERF_PMU_ID_XSCALE1
,
2539 .handle_irq
= xscale1pmu_handle_irq
,
2540 .enable
= xscale1pmu_enable_event
,
2541 .disable
= xscale1pmu_disable_event
,
2542 .event_map
= xscalepmu_event_map
,
2543 .raw_event
= xscalepmu_raw_event
,
2544 .read_counter
= xscale1pmu_read_counter
,
2545 .write_counter
= xscale1pmu_write_counter
,
2546 .get_event_idx
= xscale1pmu_get_event_idx
,
2547 .start
= xscale1pmu_start
,
2548 .stop
= xscale1pmu_stop
,
2550 .max_period
= (1LLU << 32) - 1,
2553 #define XSCALE2_OVERFLOWED_MASK 0x01f
2554 #define XSCALE2_CCOUNT_OVERFLOW 0x001
2555 #define XSCALE2_COUNT0_OVERFLOW 0x002
2556 #define XSCALE2_COUNT1_OVERFLOW 0x004
2557 #define XSCALE2_COUNT2_OVERFLOW 0x008
2558 #define XSCALE2_COUNT3_OVERFLOW 0x010
2559 #define XSCALE2_CCOUNT_INT_EN 0x001
2560 #define XSCALE2_COUNT0_INT_EN 0x002
2561 #define XSCALE2_COUNT1_INT_EN 0x004
2562 #define XSCALE2_COUNT2_INT_EN 0x008
2563 #define XSCALE2_COUNT3_INT_EN 0x010
2564 #define XSCALE2_COUNT0_EVT_SHFT 0
2565 #define XSCALE2_COUNT0_EVT_MASK (0xff << XSCALE2_COUNT0_EVT_SHFT)
2566 #define XSCALE2_COUNT1_EVT_SHFT 8
2567 #define XSCALE2_COUNT1_EVT_MASK (0xff << XSCALE2_COUNT1_EVT_SHFT)
2568 #define XSCALE2_COUNT2_EVT_SHFT 16
2569 #define XSCALE2_COUNT2_EVT_MASK (0xff << XSCALE2_COUNT2_EVT_SHFT)
2570 #define XSCALE2_COUNT3_EVT_SHFT 24
2571 #define XSCALE2_COUNT3_EVT_MASK (0xff << XSCALE2_COUNT3_EVT_SHFT)
2574 xscale2pmu_read_pmnc(void)
2577 asm volatile("mrc p14, 0, %0, c0, c1, 0" : "=r" (val
));
2578 /* bits 1-2 and 4-23 are read-unpredictable */
2579 return val
& 0xff000009;
2583 xscale2pmu_write_pmnc(u32 val
)
2585 /* bits 4-23 are write-as-0, 24-31 are write ignored */
2587 asm volatile("mcr p14, 0, %0, c0, c1, 0" : : "r" (val
));
2591 xscale2pmu_read_overflow_flags(void)
2594 asm volatile("mrc p14, 0, %0, c5, c1, 0" : "=r" (val
));
2599 xscale2pmu_write_overflow_flags(u32 val
)
2601 asm volatile("mcr p14, 0, %0, c5, c1, 0" : : "r" (val
));
2605 xscale2pmu_read_event_select(void)
2608 asm volatile("mrc p14, 0, %0, c8, c1, 0" : "=r" (val
));
2613 xscale2pmu_write_event_select(u32 val
)
2615 asm volatile("mcr p14, 0, %0, c8, c1, 0" : : "r"(val
));
2619 xscale2pmu_read_int_enable(void)
2622 asm volatile("mrc p14, 0, %0, c4, c1, 0" : "=r" (val
));
2627 xscale2pmu_write_int_enable(u32 val
)
2629 asm volatile("mcr p14, 0, %0, c4, c1, 0" : : "r" (val
));
2633 xscale2_pmnc_counter_has_overflowed(unsigned long of_flags
,
2634 enum xscale_counters counter
)
2639 case XSCALE_CYCLE_COUNTER
:
2640 ret
= of_flags
& XSCALE2_CCOUNT_OVERFLOW
;
2642 case XSCALE_COUNTER0
:
2643 ret
= of_flags
& XSCALE2_COUNT0_OVERFLOW
;
2645 case XSCALE_COUNTER1
:
2646 ret
= of_flags
& XSCALE2_COUNT1_OVERFLOW
;
2648 case XSCALE_COUNTER2
:
2649 ret
= of_flags
& XSCALE2_COUNT2_OVERFLOW
;
2651 case XSCALE_COUNTER3
:
2652 ret
= of_flags
& XSCALE2_COUNT3_OVERFLOW
;
2655 WARN_ONCE(1, "invalid counter number (%d)\n", counter
);
2662 xscale2pmu_handle_irq(int irq_num
, void *dev
)
2664 unsigned long pmnc
, of_flags
;
2665 struct perf_sample_data data
;
2666 struct cpu_hw_events
*cpuc
;
2667 struct pt_regs
*regs
;
2670 /* Disable the PMU. */
2671 pmnc
= xscale2pmu_read_pmnc();
2672 xscale2pmu_write_pmnc(pmnc
& ~XSCALE_PMU_ENABLE
);
2674 /* Check the overflow flag register. */
2675 of_flags
= xscale2pmu_read_overflow_flags();
2676 if (!(of_flags
& XSCALE2_OVERFLOWED_MASK
))
2679 /* Clear the overflow bits. */
2680 xscale2pmu_write_overflow_flags(of_flags
);
2682 regs
= get_irq_regs();
2684 perf_sample_data_init(&data
, 0);
2686 cpuc
= &__get_cpu_var(cpu_hw_events
);
2687 for (idx
= 0; idx
<= armpmu
->num_events
; ++idx
) {
2688 struct perf_event
*event
= cpuc
->events
[idx
];
2689 struct hw_perf_event
*hwc
;
2691 if (!test_bit(idx
, cpuc
->active_mask
))
2694 if (!xscale2_pmnc_counter_has_overflowed(pmnc
, idx
))
2698 armpmu_event_update(event
, hwc
, idx
);
2699 data
.period
= event
->hw
.last_period
;
2700 if (!armpmu_event_set_period(event
, hwc
, idx
))
2703 if (perf_event_overflow(event
, 0, &data
, regs
))
2704 armpmu
->disable(hwc
, idx
);
2707 perf_event_do_pending();
2710 * Re-enable the PMU.
2712 pmnc
= xscale2pmu_read_pmnc() | XSCALE_PMU_ENABLE
;
2713 xscale2pmu_write_pmnc(pmnc
);
2719 xscale2pmu_enable_event(struct hw_perf_event
*hwc
, int idx
)
2721 unsigned long flags
, ien
, evtsel
;
2723 ien
= xscale2pmu_read_int_enable();
2724 evtsel
= xscale2pmu_read_event_select();
2727 case XSCALE_CYCLE_COUNTER
:
2728 ien
|= XSCALE2_CCOUNT_INT_EN
;
2730 case XSCALE_COUNTER0
:
2731 ien
|= XSCALE2_COUNT0_INT_EN
;
2732 evtsel
&= ~XSCALE2_COUNT0_EVT_MASK
;
2733 evtsel
|= hwc
->config_base
<< XSCALE2_COUNT0_EVT_SHFT
;
2735 case XSCALE_COUNTER1
:
2736 ien
|= XSCALE2_COUNT1_INT_EN
;
2737 evtsel
&= ~XSCALE2_COUNT1_EVT_MASK
;
2738 evtsel
|= hwc
->config_base
<< XSCALE2_COUNT1_EVT_SHFT
;
2740 case XSCALE_COUNTER2
:
2741 ien
|= XSCALE2_COUNT2_INT_EN
;
2742 evtsel
&= ~XSCALE2_COUNT2_EVT_MASK
;
2743 evtsel
|= hwc
->config_base
<< XSCALE2_COUNT2_EVT_SHFT
;
2745 case XSCALE_COUNTER3
:
2746 ien
|= XSCALE2_COUNT3_INT_EN
;
2747 evtsel
&= ~XSCALE2_COUNT3_EVT_MASK
;
2748 evtsel
|= hwc
->config_base
<< XSCALE2_COUNT3_EVT_SHFT
;
2751 WARN_ONCE(1, "invalid counter number (%d)\n", idx
);
2755 spin_lock_irqsave(&pmu_lock
, flags
);
2756 xscale2pmu_write_event_select(evtsel
);
2757 xscale2pmu_write_int_enable(ien
);
2758 spin_unlock_irqrestore(&pmu_lock
, flags
);
2762 xscale2pmu_disable_event(struct hw_perf_event
*hwc
, int idx
)
2764 unsigned long flags
, ien
, evtsel
;
2766 ien
= xscale2pmu_read_int_enable();
2767 evtsel
= xscale2pmu_read_event_select();
2770 case XSCALE_CYCLE_COUNTER
:
2771 ien
&= ~XSCALE2_CCOUNT_INT_EN
;
2773 case XSCALE_COUNTER0
:
2774 ien
&= ~XSCALE2_COUNT0_INT_EN
;
2775 evtsel
&= ~XSCALE2_COUNT0_EVT_MASK
;
2776 evtsel
|= XSCALE_PERFCTR_UNUSED
<< XSCALE2_COUNT0_EVT_SHFT
;
2778 case XSCALE_COUNTER1
:
2779 ien
&= ~XSCALE2_COUNT1_INT_EN
;
2780 evtsel
&= ~XSCALE2_COUNT1_EVT_MASK
;
2781 evtsel
|= XSCALE_PERFCTR_UNUSED
<< XSCALE2_COUNT1_EVT_SHFT
;
2783 case XSCALE_COUNTER2
:
2784 ien
&= ~XSCALE2_COUNT2_INT_EN
;
2785 evtsel
&= ~XSCALE2_COUNT2_EVT_MASK
;
2786 evtsel
|= XSCALE_PERFCTR_UNUSED
<< XSCALE2_COUNT2_EVT_SHFT
;
2788 case XSCALE_COUNTER3
:
2789 ien
&= ~XSCALE2_COUNT3_INT_EN
;
2790 evtsel
&= ~XSCALE2_COUNT3_EVT_MASK
;
2791 evtsel
|= XSCALE_PERFCTR_UNUSED
<< XSCALE2_COUNT3_EVT_SHFT
;
2794 WARN_ONCE(1, "invalid counter number (%d)\n", idx
);
2798 spin_lock_irqsave(&pmu_lock
, flags
);
2799 xscale2pmu_write_event_select(evtsel
);
2800 xscale2pmu_write_int_enable(ien
);
2801 spin_unlock_irqrestore(&pmu_lock
, flags
);
2805 xscale2pmu_get_event_idx(struct cpu_hw_events
*cpuc
,
2806 struct hw_perf_event
*event
)
2808 int idx
= xscale1pmu_get_event_idx(cpuc
, event
);
2812 if (!test_and_set_bit(XSCALE_COUNTER3
, cpuc
->used_mask
))
2813 idx
= XSCALE_COUNTER3
;
2814 else if (!test_and_set_bit(XSCALE_COUNTER2
, cpuc
->used_mask
))
2815 idx
= XSCALE_COUNTER2
;
2821 xscale2pmu_start(void)
2823 unsigned long flags
, val
;
2825 spin_lock_irqsave(&pmu_lock
, flags
);
2826 val
= xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64
;
2827 val
|= XSCALE_PMU_ENABLE
;
2828 xscale2pmu_write_pmnc(val
);
2829 spin_unlock_irqrestore(&pmu_lock
, flags
);
2833 xscale2pmu_stop(void)
2835 unsigned long flags
, val
;
2837 spin_lock_irqsave(&pmu_lock
, flags
);
2838 val
= xscale2pmu_read_pmnc();
2839 val
&= ~XSCALE_PMU_ENABLE
;
2840 xscale2pmu_write_pmnc(val
);
2841 spin_unlock_irqrestore(&pmu_lock
, flags
);
2845 xscale2pmu_read_counter(int counter
)
2850 case XSCALE_CYCLE_COUNTER
:
2851 asm volatile("mrc p14, 0, %0, c1, c1, 0" : "=r" (val
));
2853 case XSCALE_COUNTER0
:
2854 asm volatile("mrc p14, 0, %0, c0, c2, 0" : "=r" (val
));
2856 case XSCALE_COUNTER1
:
2857 asm volatile("mrc p14, 0, %0, c1, c2, 0" : "=r" (val
));
2859 case XSCALE_COUNTER2
:
2860 asm volatile("mrc p14, 0, %0, c2, c2, 0" : "=r" (val
));
2862 case XSCALE_COUNTER3
:
2863 asm volatile("mrc p14, 0, %0, c3, c2, 0" : "=r" (val
));
2871 xscale2pmu_write_counter(int counter
, u32 val
)
2874 case XSCALE_CYCLE_COUNTER
:
2875 asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val
));
2877 case XSCALE_COUNTER0
:
2878 asm volatile("mcr p14, 0, %0, c0, c2, 0" : : "r" (val
));
2880 case XSCALE_COUNTER1
:
2881 asm volatile("mcr p14, 0, %0, c1, c2, 0" : : "r" (val
));
2883 case XSCALE_COUNTER2
:
2884 asm volatile("mcr p14, 0, %0, c2, c2, 0" : : "r" (val
));
2886 case XSCALE_COUNTER3
:
2887 asm volatile("mcr p14, 0, %0, c3, c2, 0" : : "r" (val
));
2892 static const struct arm_pmu xscale2pmu
= {
2893 .id
= ARM_PERF_PMU_ID_XSCALE2
,
2894 .handle_irq
= xscale2pmu_handle_irq
,
2895 .enable
= xscale2pmu_enable_event
,
2896 .disable
= xscale2pmu_disable_event
,
2897 .event_map
= xscalepmu_event_map
,
2898 .raw_event
= xscalepmu_raw_event
,
2899 .read_counter
= xscale2pmu_read_counter
,
2900 .write_counter
= xscale2pmu_write_counter
,
2901 .get_event_idx
= xscale2pmu_get_event_idx
,
2902 .start
= xscale2pmu_start
,
2903 .stop
= xscale2pmu_stop
,
2905 .max_period
= (1LLU << 32) - 1,
2909 init_hw_perf_events(void)
2911 unsigned long cpuid
= read_cpuid_id();
2912 unsigned long implementor
= (cpuid
& 0xFF000000) >> 24;
2913 unsigned long part_number
= (cpuid
& 0xFFF0);
2916 if (0x41 == implementor
) {
2917 switch (part_number
) {
2918 case 0xB360: /* ARM1136 */
2919 case 0xB560: /* ARM1156 */
2920 case 0xB760: /* ARM1176 */
2922 memcpy(armpmu_perf_cache_map
, armv6_perf_cache_map
,
2923 sizeof(armv6_perf_cache_map
));
2924 perf_max_events
= armv6pmu
.num_events
;
2926 case 0xB020: /* ARM11mpcore */
2927 armpmu
= &armv6mpcore_pmu
;
2928 memcpy(armpmu_perf_cache_map
,
2929 armv6mpcore_perf_cache_map
,
2930 sizeof(armv6mpcore_perf_cache_map
));
2931 perf_max_events
= armv6mpcore_pmu
.num_events
;
2933 case 0xC080: /* Cortex-A8 */
2934 armv7pmu
.id
= ARM_PERF_PMU_ID_CA8
;
2935 memcpy(armpmu_perf_cache_map
, armv7_a8_perf_cache_map
,
2936 sizeof(armv7_a8_perf_cache_map
));
2937 armv7pmu
.event_map
= armv7_a8_pmu_event_map
;
2940 /* Reset PMNC and read the nb of CNTx counters
2942 armv7pmu
.num_events
= armv7_reset_read_pmnc();
2943 perf_max_events
= armv7pmu
.num_events
;
2945 case 0xC090: /* Cortex-A9 */
2946 armv7pmu
.id
= ARM_PERF_PMU_ID_CA9
;
2947 memcpy(armpmu_perf_cache_map
, armv7_a9_perf_cache_map
,
2948 sizeof(armv7_a9_perf_cache_map
));
2949 armv7pmu
.event_map
= armv7_a9_pmu_event_map
;
2952 /* Reset PMNC and read the nb of CNTx counters
2954 armv7pmu
.num_events
= armv7_reset_read_pmnc();
2955 perf_max_events
= armv7pmu
.num_events
;
2958 /* Intel CPUs [xscale]. */
2959 } else if (0x69 == implementor
) {
2960 part_number
= (cpuid
>> 13) & 0x7;
2961 switch (part_number
) {
2963 armpmu
= &xscale1pmu
;
2964 memcpy(armpmu_perf_cache_map
, xscale_perf_cache_map
,
2965 sizeof(xscale_perf_cache_map
));
2966 perf_max_events
= xscale1pmu
.num_events
;
2969 armpmu
= &xscale2pmu
;
2970 memcpy(armpmu_perf_cache_map
, xscale_perf_cache_map
,
2971 sizeof(xscale_perf_cache_map
));
2972 perf_max_events
= xscale2pmu
.num_events
;
2978 pr_info("enabled with %s PMU driver, %d counters available\n",
2979 arm_pmu_names
[armpmu
->id
], armpmu
->num_events
);
2981 pr_info("no hardware support available\n");
2982 perf_max_events
= -1;
2987 arch_initcall(init_hw_perf_events
);
2990 * Callchain handling code.
2993 callchain_store(struct perf_callchain_entry
*entry
,
2996 if (entry
->nr
< PERF_MAX_STACK_DEPTH
)
2997 entry
->ip
[entry
->nr
++] = ip
;
3001 * The registers we're interested in are at the end of the variable
3002 * length saved register structure. The fp points at the end of this
3003 * structure so the address of this struct is:
3004 * (struct frame_tail *)(xxx->fp)-1
3006 * This code has been adapted from the ARM OProfile support.
3009 struct frame_tail
*fp
;
3012 } __attribute__((packed
));
3015 * Get the return address for a single stackframe and return a pointer to the
3018 static struct frame_tail
*
3019 user_backtrace(struct frame_tail
*tail
,
3020 struct perf_callchain_entry
*entry
)
3022 struct frame_tail buftail
;
3024 /* Also check accessibility of one struct frame_tail beyond */
3025 if (!access_ok(VERIFY_READ
, tail
, sizeof(buftail
)))
3027 if (__copy_from_user_inatomic(&buftail
, tail
, sizeof(buftail
)))
3030 callchain_store(entry
, buftail
.lr
);
3033 * Frame pointers should strictly progress back up the stack
3034 * (towards higher addresses).
3036 if (tail
>= buftail
.fp
)
3039 return buftail
.fp
- 1;
3043 perf_callchain_user(struct pt_regs
*regs
,
3044 struct perf_callchain_entry
*entry
)
3046 struct frame_tail
*tail
;
3048 callchain_store(entry
, PERF_CONTEXT_USER
);
3050 if (!user_mode(regs
))
3051 regs
= task_pt_regs(current
);
3053 tail
= (struct frame_tail
*)regs
->ARM_fp
- 1;
3055 while (tail
&& !((unsigned long)tail
& 0x3))
3056 tail
= user_backtrace(tail
, entry
);
3060 * Gets called by walk_stackframe() for every stackframe. This will be called
3061 * whist unwinding the stackframe and is like a subroutine return so we use
3065 callchain_trace(struct stackframe
*fr
,
3068 struct perf_callchain_entry
*entry
= data
;
3069 callchain_store(entry
, fr
->pc
);
3074 perf_callchain_kernel(struct pt_regs
*regs
,
3075 struct perf_callchain_entry
*entry
)
3077 struct stackframe fr
;
3079 callchain_store(entry
, PERF_CONTEXT_KERNEL
);
3080 fr
.fp
= regs
->ARM_fp
;
3081 fr
.sp
= regs
->ARM_sp
;
3082 fr
.lr
= regs
->ARM_lr
;
3083 fr
.pc
= regs
->ARM_pc
;
3084 walk_stackframe(&fr
, callchain_trace
, entry
);
3088 perf_do_callchain(struct pt_regs
*regs
,
3089 struct perf_callchain_entry
*entry
)
3096 is_user
= user_mode(regs
);
3098 if (!current
|| !current
->pid
)
3101 if (is_user
&& current
->state
!= TASK_RUNNING
)
3105 perf_callchain_kernel(regs
, entry
);
3108 perf_callchain_user(regs
, entry
);
3111 static DEFINE_PER_CPU(struct perf_callchain_entry
, pmc_irq_entry
);
3113 struct perf_callchain_entry
*
3114 perf_callchain(struct pt_regs
*regs
)
3116 struct perf_callchain_entry
*entry
= &__get_cpu_var(pmc_irq_entry
);
3119 perf_do_callchain(regs
, entry
);