4 * ARM performance counter support.
6 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
8 * ARMv7 support: Jean Pihet <jpihet@mvista.com>
9 * 2010 (c) MontaVista Software, LLC.
11 * This code is based on the sparc64 perf event code, which is in turn based
12 * on the x86 code. Callchain code is based on the ARM OProfile backtrace
15 #define pr_fmt(fmt) "hw perfevents: " fmt
17 #include <linux/interrupt.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/perf_event.h>
21 #include <linux/platform_device.h>
22 #include <linux/spinlock.h>
23 #include <linux/uaccess.h>
25 #include <asm/cputype.h>
27 #include <asm/irq_regs.h>
29 #include <asm/stacktrace.h>
31 static struct platform_device
*pmu_device
;
34 * Hardware lock to serialize accesses to PMU registers. Needed for the
35 * read/modify/write sequences.
37 DEFINE_SPINLOCK(pmu_lock
);
40 * ARMv6 supports a maximum of 3 events, starting from index 1. If we add
41 * another platform that supports more, we need to increase this to be the
42 * largest of all platforms.
44 * ARMv7 supports up to 32 events:
45 * cycle counter CCNT + 31 events counters CNT0..30.
46 * Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters.
48 #define ARMPMU_MAX_HWEVENTS 33
50 /* The events for a given CPU. */
51 struct cpu_hw_events
{
53 * The events that are active on the CPU for the given index. Index 0
56 struct perf_event
*events
[ARMPMU_MAX_HWEVENTS
];
59 * A 1 bit for an index indicates that the counter is being used for
60 * an event. A 0 means that the counter can be used.
62 unsigned long used_mask
[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS
)];
65 * A 1 bit for an index indicates that the counter is actively being
68 unsigned long active_mask
[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS
)];
70 DEFINE_PER_CPU(struct cpu_hw_events
, cpu_hw_events
);
73 enum arm_perf_pmu_ids id
;
75 irqreturn_t (*handle_irq
)(int irq_num
, void *dev
);
76 void (*enable
)(struct hw_perf_event
*evt
, int idx
);
77 void (*disable
)(struct hw_perf_event
*evt
, int idx
);
78 int (*get_event_idx
)(struct cpu_hw_events
*cpuc
,
79 struct hw_perf_event
*hwc
);
80 u32 (*read_counter
)(int idx
);
81 void (*write_counter
)(int idx
, u32 val
);
84 const unsigned (*cache_map
)[PERF_COUNT_HW_CACHE_MAX
]
85 [PERF_COUNT_HW_CACHE_OP_MAX
]
86 [PERF_COUNT_HW_CACHE_RESULT_MAX
];
87 const unsigned (*event_map
)[PERF_COUNT_HW_MAX
];
93 /* Set at runtime when we know what CPU type we are. */
94 static const struct arm_pmu
*armpmu
;
97 armpmu_get_pmu_id(void)
106 EXPORT_SYMBOL_GPL(armpmu_get_pmu_id
);
109 armpmu_get_max_events(void)
114 max_events
= armpmu
->num_events
;
118 EXPORT_SYMBOL_GPL(armpmu_get_max_events
);
120 int perf_num_counters(void)
122 return armpmu_get_max_events();
124 EXPORT_SYMBOL_GPL(perf_num_counters
);
126 #define HW_OP_UNSUPPORTED 0xFFFF
129 PERF_COUNT_HW_CACHE_##_x
131 #define CACHE_OP_UNSUPPORTED 0xFFFF
134 armpmu_map_cache_event(u64 config
)
136 unsigned int cache_type
, cache_op
, cache_result
, ret
;
138 cache_type
= (config
>> 0) & 0xff;
139 if (cache_type
>= PERF_COUNT_HW_CACHE_MAX
)
142 cache_op
= (config
>> 8) & 0xff;
143 if (cache_op
>= PERF_COUNT_HW_CACHE_OP_MAX
)
146 cache_result
= (config
>> 16) & 0xff;
147 if (cache_result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
150 ret
= (int)(*armpmu
->cache_map
)[cache_type
][cache_op
][cache_result
];
152 if (ret
== CACHE_OP_UNSUPPORTED
)
159 armpmu_map_event(u64 config
)
161 int mapping
= (*armpmu
->event_map
)[config
];
162 return mapping
== HW_OP_UNSUPPORTED
? -EOPNOTSUPP
: mapping
;
166 armpmu_map_raw_event(u64 config
)
168 return (int)(config
& armpmu
->raw_event_mask
);
172 armpmu_event_set_period(struct perf_event
*event
,
173 struct hw_perf_event
*hwc
,
176 s64 left
= local64_read(&hwc
->period_left
);
177 s64 period
= hwc
->sample_period
;
180 if (unlikely(left
<= -period
)) {
182 local64_set(&hwc
->period_left
, left
);
183 hwc
->last_period
= period
;
187 if (unlikely(left
<= 0)) {
189 local64_set(&hwc
->period_left
, left
);
190 hwc
->last_period
= period
;
194 if (left
> (s64
)armpmu
->max_period
)
195 left
= armpmu
->max_period
;
197 local64_set(&hwc
->prev_count
, (u64
)-left
);
199 armpmu
->write_counter(idx
, (u64
)(-left
) & 0xffffffff);
201 perf_event_update_userpage(event
);
207 armpmu_event_update(struct perf_event
*event
,
208 struct hw_perf_event
*hwc
,
212 s64 prev_raw_count
, new_raw_count
;
216 prev_raw_count
= local64_read(&hwc
->prev_count
);
217 new_raw_count
= armpmu
->read_counter(idx
);
219 if (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
220 new_raw_count
) != prev_raw_count
)
223 delta
= (new_raw_count
<< shift
) - (prev_raw_count
<< shift
);
226 local64_add(delta
, &event
->count
);
227 local64_sub(delta
, &hwc
->period_left
);
229 return new_raw_count
;
233 armpmu_read(struct perf_event
*event
)
235 struct hw_perf_event
*hwc
= &event
->hw
;
237 /* Don't read disabled counters! */
241 armpmu_event_update(event
, hwc
, hwc
->idx
);
245 armpmu_stop(struct perf_event
*event
, int flags
)
247 struct hw_perf_event
*hwc
= &event
->hw
;
253 * ARM pmu always has to update the counter, so ignore
254 * PERF_EF_UPDATE, see comments in armpmu_start().
256 if (!(hwc
->state
& PERF_HES_STOPPED
)) {
257 armpmu
->disable(hwc
, hwc
->idx
);
258 barrier(); /* why? */
259 armpmu_event_update(event
, hwc
, hwc
->idx
);
260 hwc
->state
|= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
265 armpmu_start(struct perf_event
*event
, int flags
)
267 struct hw_perf_event
*hwc
= &event
->hw
;
273 * ARM pmu always has to reprogram the period, so ignore
274 * PERF_EF_RELOAD, see the comment below.
276 if (flags
& PERF_EF_RELOAD
)
277 WARN_ON_ONCE(!(hwc
->state
& PERF_HES_UPTODATE
));
281 * Set the period again. Some counters can't be stopped, so when we
282 * were stopped we simply disabled the IRQ source and the counter
283 * may have been left counting. If we don't do this step then we may
284 * get an interrupt too soon or *way* too late if the overflow has
285 * happened since disabling.
287 armpmu_event_set_period(event
, hwc
, hwc
->idx
);
288 armpmu
->enable(hwc
, hwc
->idx
);
292 armpmu_del(struct perf_event
*event
, int flags
)
294 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
295 struct hw_perf_event
*hwc
= &event
->hw
;
300 clear_bit(idx
, cpuc
->active_mask
);
301 armpmu_stop(event
, PERF_EF_UPDATE
);
302 cpuc
->events
[idx
] = NULL
;
303 clear_bit(idx
, cpuc
->used_mask
);
305 perf_event_update_userpage(event
);
309 armpmu_add(struct perf_event
*event
, int flags
)
311 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
312 struct hw_perf_event
*hwc
= &event
->hw
;
316 perf_pmu_disable(event
->pmu
);
318 /* If we don't have a space for the counter then finish early. */
319 idx
= armpmu
->get_event_idx(cpuc
, hwc
);
326 * If there is an event in the counter we are going to use then make
327 * sure it is disabled.
330 armpmu
->disable(hwc
, idx
);
331 cpuc
->events
[idx
] = event
;
332 set_bit(idx
, cpuc
->active_mask
);
334 hwc
->state
= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
335 if (flags
& PERF_EF_START
)
336 armpmu_start(event
, PERF_EF_RELOAD
);
338 /* Propagate our changes to the userspace mapping. */
339 perf_event_update_userpage(event
);
342 perf_pmu_enable(event
->pmu
);
346 static struct pmu pmu
;
349 validate_event(struct cpu_hw_events
*cpuc
,
350 struct perf_event
*event
)
352 struct hw_perf_event fake_event
= event
->hw
;
354 if (event
->pmu
!= &pmu
|| event
->state
<= PERF_EVENT_STATE_OFF
)
357 return armpmu
->get_event_idx(cpuc
, &fake_event
) >= 0;
361 validate_group(struct perf_event
*event
)
363 struct perf_event
*sibling
, *leader
= event
->group_leader
;
364 struct cpu_hw_events fake_pmu
;
366 memset(&fake_pmu
, 0, sizeof(fake_pmu
));
368 if (!validate_event(&fake_pmu
, leader
))
371 list_for_each_entry(sibling
, &leader
->sibling_list
, group_entry
) {
372 if (!validate_event(&fake_pmu
, sibling
))
376 if (!validate_event(&fake_pmu
, event
))
383 armpmu_reserve_hardware(void)
385 int i
, err
= -ENODEV
, irq
;
387 pmu_device
= reserve_pmu(ARM_PMU_DEVICE_CPU
);
388 if (IS_ERR(pmu_device
)) {
389 pr_warning("unable to reserve pmu\n");
390 return PTR_ERR(pmu_device
);
393 init_pmu(ARM_PMU_DEVICE_CPU
);
395 if (pmu_device
->num_resources
< 1) {
396 pr_err("no irqs for PMUs defined\n");
400 for (i
= 0; i
< pmu_device
->num_resources
; ++i
) {
401 irq
= platform_get_irq(pmu_device
, i
);
405 err
= request_irq(irq
, armpmu
->handle_irq
,
406 IRQF_DISABLED
| IRQF_NOBALANCING
,
409 pr_warning("unable to request IRQ%d for ARM perf "
416 for (i
= i
- 1; i
>= 0; --i
) {
417 irq
= platform_get_irq(pmu_device
, i
);
421 release_pmu(pmu_device
);
429 armpmu_release_hardware(void)
433 for (i
= pmu_device
->num_resources
- 1; i
>= 0; --i
) {
434 irq
= platform_get_irq(pmu_device
, i
);
440 release_pmu(pmu_device
);
444 static atomic_t active_events
= ATOMIC_INIT(0);
445 static DEFINE_MUTEX(pmu_reserve_mutex
);
448 hw_perf_event_destroy(struct perf_event
*event
)
450 if (atomic_dec_and_mutex_lock(&active_events
, &pmu_reserve_mutex
)) {
451 armpmu_release_hardware();
452 mutex_unlock(&pmu_reserve_mutex
);
457 __hw_perf_event_init(struct perf_event
*event
)
459 struct hw_perf_event
*hwc
= &event
->hw
;
462 /* Decode the generic type into an ARM event identifier. */
463 if (PERF_TYPE_HARDWARE
== event
->attr
.type
) {
464 mapping
= armpmu_map_event(event
->attr
.config
);
465 } else if (PERF_TYPE_HW_CACHE
== event
->attr
.type
) {
466 mapping
= armpmu_map_cache_event(event
->attr
.config
);
467 } else if (PERF_TYPE_RAW
== event
->attr
.type
) {
468 mapping
= armpmu_map_raw_event(event
->attr
.config
);
470 pr_debug("event type %x not supported\n", event
->attr
.type
);
475 pr_debug("event %x:%llx not supported\n", event
->attr
.type
,
481 * Check whether we need to exclude the counter from certain modes.
482 * The ARM performance counters are on all of the time so if someone
483 * has asked us for some excludes then we have to fail.
485 if (event
->attr
.exclude_kernel
|| event
->attr
.exclude_user
||
486 event
->attr
.exclude_hv
|| event
->attr
.exclude_idle
) {
487 pr_debug("ARM performance counters do not support "
493 * We don't assign an index until we actually place the event onto
494 * hardware. Use -1 to signify that we haven't decided where to put it
495 * yet. For SMP systems, each core has it's own PMU so we can't do any
496 * clever allocation or constraints checking at this point.
501 * Store the event encoding into the config_base field. config and
502 * event_base are unused as the only 2 things we need to know are
503 * the event mapping and the counter to use. The counter to use is
504 * also the indx and the config_base is the event type.
506 hwc
->config_base
= (unsigned long)mapping
;
510 if (!hwc
->sample_period
) {
511 hwc
->sample_period
= armpmu
->max_period
;
512 hwc
->last_period
= hwc
->sample_period
;
513 local64_set(&hwc
->period_left
, hwc
->sample_period
);
517 if (event
->group_leader
!= event
) {
518 err
= validate_group(event
);
526 static int armpmu_event_init(struct perf_event
*event
)
530 switch (event
->attr
.type
) {
532 case PERF_TYPE_HARDWARE
:
533 case PERF_TYPE_HW_CACHE
:
543 event
->destroy
= hw_perf_event_destroy
;
545 if (!atomic_inc_not_zero(&active_events
)) {
546 if (atomic_read(&active_events
) > armpmu
->num_events
) {
547 atomic_dec(&active_events
);
551 mutex_lock(&pmu_reserve_mutex
);
552 if (atomic_read(&active_events
) == 0) {
553 err
= armpmu_reserve_hardware();
557 atomic_inc(&active_events
);
558 mutex_unlock(&pmu_reserve_mutex
);
564 err
= __hw_perf_event_init(event
);
566 hw_perf_event_destroy(event
);
571 static void armpmu_enable(struct pmu
*pmu
)
573 /* Enable all of the perf events on hardware. */
575 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
580 for (idx
= 0; idx
<= armpmu
->num_events
; ++idx
) {
581 struct perf_event
*event
= cpuc
->events
[idx
];
586 armpmu
->enable(&event
->hw
, idx
);
592 static void armpmu_disable(struct pmu
*pmu
)
598 static struct pmu pmu
= {
599 .pmu_enable
= armpmu_enable
,
600 .pmu_disable
= armpmu_disable
,
601 .event_init
= armpmu_event_init
,
604 .start
= armpmu_start
,
610 * ARMv6 Performance counter handling code.
612 * ARMv6 has 2 configurable performance counters and a single cycle counter.
613 * They all share a single reset bit but can be written to zero so we can use
616 * The counters can't be individually enabled or disabled so when we remove
617 * one event and replace it with another we could get spurious counts from the
618 * wrong event. However, we can take advantage of the fact that the
619 * performance counters can export events to the event bus, and the event bus
620 * itself can be monitored. This requires that we *don't* export the events to
621 * the event bus. The procedure for disabling a configurable counter is:
622 * - change the counter to count the ETMEXTOUT[0] signal (0x20). This
623 * effectively stops the counter from counting.
624 * - disable the counter's interrupt generation (each counter has it's
625 * own interrupt enable bit).
626 * Once stopped, the counter value can be written as 0 to reset.
628 * To enable a counter:
629 * - enable the counter's interrupt generation.
630 * - set the new event type.
632 * Note: the dedicated cycle counter only counts cycles and can't be
633 * enabled/disabled independently of the others. When we want to disable the
634 * cycle counter, we have to just disable the interrupt reporting and start
635 * ignoring that counter. When re-enabling, we have to reset the value and
636 * enable the interrupt.
639 enum armv6_perf_types
{
640 ARMV6_PERFCTR_ICACHE_MISS
= 0x0,
641 ARMV6_PERFCTR_IBUF_STALL
= 0x1,
642 ARMV6_PERFCTR_DDEP_STALL
= 0x2,
643 ARMV6_PERFCTR_ITLB_MISS
= 0x3,
644 ARMV6_PERFCTR_DTLB_MISS
= 0x4,
645 ARMV6_PERFCTR_BR_EXEC
= 0x5,
646 ARMV6_PERFCTR_BR_MISPREDICT
= 0x6,
647 ARMV6_PERFCTR_INSTR_EXEC
= 0x7,
648 ARMV6_PERFCTR_DCACHE_HIT
= 0x9,
649 ARMV6_PERFCTR_DCACHE_ACCESS
= 0xA,
650 ARMV6_PERFCTR_DCACHE_MISS
= 0xB,
651 ARMV6_PERFCTR_DCACHE_WBACK
= 0xC,
652 ARMV6_PERFCTR_SW_PC_CHANGE
= 0xD,
653 ARMV6_PERFCTR_MAIN_TLB_MISS
= 0xF,
654 ARMV6_PERFCTR_EXPL_D_ACCESS
= 0x10,
655 ARMV6_PERFCTR_LSU_FULL_STALL
= 0x11,
656 ARMV6_PERFCTR_WBUF_DRAINED
= 0x12,
657 ARMV6_PERFCTR_CPU_CYCLES
= 0xFF,
658 ARMV6_PERFCTR_NOP
= 0x20,
661 enum armv6_counters
{
662 ARMV6_CYCLE_COUNTER
= 1,
668 * The hardware events that we support. We do support cache operations but
669 * we have harvard caches and no way to combine instruction and data
670 * accesses/misses in hardware.
672 static const unsigned armv6_perf_map
[PERF_COUNT_HW_MAX
] = {
673 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV6_PERFCTR_CPU_CYCLES
,
674 [PERF_COUNT_HW_INSTRUCTIONS
] = ARMV6_PERFCTR_INSTR_EXEC
,
675 [PERF_COUNT_HW_CACHE_REFERENCES
] = HW_OP_UNSUPPORTED
,
676 [PERF_COUNT_HW_CACHE_MISSES
] = HW_OP_UNSUPPORTED
,
677 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = ARMV6_PERFCTR_BR_EXEC
,
678 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV6_PERFCTR_BR_MISPREDICT
,
679 [PERF_COUNT_HW_BUS_CYCLES
] = HW_OP_UNSUPPORTED
,
682 static const unsigned armv6_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
683 [PERF_COUNT_HW_CACHE_OP_MAX
]
684 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
687 * The performance counters don't differentiate between read
688 * and write accesses/misses so this isn't strictly correct,
689 * but it's the best we can do. Writes and reads get
693 [C(RESULT_ACCESS
)] = ARMV6_PERFCTR_DCACHE_ACCESS
,
694 [C(RESULT_MISS
)] = ARMV6_PERFCTR_DCACHE_MISS
,
697 [C(RESULT_ACCESS
)] = ARMV6_PERFCTR_DCACHE_ACCESS
,
698 [C(RESULT_MISS
)] = ARMV6_PERFCTR_DCACHE_MISS
,
701 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
702 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
707 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
708 [C(RESULT_MISS
)] = ARMV6_PERFCTR_ICACHE_MISS
,
711 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
712 [C(RESULT_MISS
)] = ARMV6_PERFCTR_ICACHE_MISS
,
715 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
716 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
721 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
722 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
725 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
726 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
729 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
730 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
735 * The ARM performance counters can count micro DTLB misses,
736 * micro ITLB misses and main TLB misses. There isn't an event
737 * for TLB misses, so use the micro misses here and if users
738 * want the main TLB misses they can use a raw counter.
741 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
742 [C(RESULT_MISS
)] = ARMV6_PERFCTR_DTLB_MISS
,
745 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
746 [C(RESULT_MISS
)] = ARMV6_PERFCTR_DTLB_MISS
,
749 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
750 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
755 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
756 [C(RESULT_MISS
)] = ARMV6_PERFCTR_ITLB_MISS
,
759 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
760 [C(RESULT_MISS
)] = ARMV6_PERFCTR_ITLB_MISS
,
763 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
764 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
769 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
770 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
773 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
774 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
777 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
778 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
783 enum armv6mpcore_perf_types
{
784 ARMV6MPCORE_PERFCTR_ICACHE_MISS
= 0x0,
785 ARMV6MPCORE_PERFCTR_IBUF_STALL
= 0x1,
786 ARMV6MPCORE_PERFCTR_DDEP_STALL
= 0x2,
787 ARMV6MPCORE_PERFCTR_ITLB_MISS
= 0x3,
788 ARMV6MPCORE_PERFCTR_DTLB_MISS
= 0x4,
789 ARMV6MPCORE_PERFCTR_BR_EXEC
= 0x5,
790 ARMV6MPCORE_PERFCTR_BR_NOTPREDICT
= 0x6,
791 ARMV6MPCORE_PERFCTR_BR_MISPREDICT
= 0x7,
792 ARMV6MPCORE_PERFCTR_INSTR_EXEC
= 0x8,
793 ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS
= 0xA,
794 ARMV6MPCORE_PERFCTR_DCACHE_RDMISS
= 0xB,
795 ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS
= 0xC,
796 ARMV6MPCORE_PERFCTR_DCACHE_WRMISS
= 0xD,
797 ARMV6MPCORE_PERFCTR_DCACHE_EVICTION
= 0xE,
798 ARMV6MPCORE_PERFCTR_SW_PC_CHANGE
= 0xF,
799 ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS
= 0x10,
800 ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS
= 0x11,
801 ARMV6MPCORE_PERFCTR_LSU_FULL_STALL
= 0x12,
802 ARMV6MPCORE_PERFCTR_WBUF_DRAINED
= 0x13,
803 ARMV6MPCORE_PERFCTR_CPU_CYCLES
= 0xFF,
807 * The hardware events that we support. We do support cache operations but
808 * we have harvard caches and no way to combine instruction and data
809 * accesses/misses in hardware.
811 static const unsigned armv6mpcore_perf_map
[PERF_COUNT_HW_MAX
] = {
812 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV6MPCORE_PERFCTR_CPU_CYCLES
,
813 [PERF_COUNT_HW_INSTRUCTIONS
] = ARMV6MPCORE_PERFCTR_INSTR_EXEC
,
814 [PERF_COUNT_HW_CACHE_REFERENCES
] = HW_OP_UNSUPPORTED
,
815 [PERF_COUNT_HW_CACHE_MISSES
] = HW_OP_UNSUPPORTED
,
816 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = ARMV6MPCORE_PERFCTR_BR_EXEC
,
817 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT
,
818 [PERF_COUNT_HW_BUS_CYCLES
] = HW_OP_UNSUPPORTED
,
821 static const unsigned armv6mpcore_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
822 [PERF_COUNT_HW_CACHE_OP_MAX
]
823 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
827 ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS
,
829 ARMV6MPCORE_PERFCTR_DCACHE_RDMISS
,
833 ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS
,
835 ARMV6MPCORE_PERFCTR_DCACHE_WRMISS
,
838 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
839 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
844 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
845 [C(RESULT_MISS
)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS
,
848 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
849 [C(RESULT_MISS
)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS
,
852 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
853 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
858 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
859 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
862 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
863 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
866 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
867 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
872 * The ARM performance counters can count micro DTLB misses,
873 * micro ITLB misses and main TLB misses. There isn't an event
874 * for TLB misses, so use the micro misses here and if users
875 * want the main TLB misses they can use a raw counter.
878 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
879 [C(RESULT_MISS
)] = ARMV6MPCORE_PERFCTR_DTLB_MISS
,
882 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
883 [C(RESULT_MISS
)] = ARMV6MPCORE_PERFCTR_DTLB_MISS
,
886 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
887 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
892 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
893 [C(RESULT_MISS
)] = ARMV6MPCORE_PERFCTR_ITLB_MISS
,
896 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
897 [C(RESULT_MISS
)] = ARMV6MPCORE_PERFCTR_ITLB_MISS
,
900 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
901 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
906 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
907 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
910 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
911 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
914 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
915 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
920 static inline unsigned long
921 armv6_pmcr_read(void)
924 asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r"(val
));
929 armv6_pmcr_write(unsigned long val
)
931 asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r"(val
));
934 #define ARMV6_PMCR_ENABLE (1 << 0)
935 #define ARMV6_PMCR_CTR01_RESET (1 << 1)
936 #define ARMV6_PMCR_CCOUNT_RESET (1 << 2)
937 #define ARMV6_PMCR_CCOUNT_DIV (1 << 3)
938 #define ARMV6_PMCR_COUNT0_IEN (1 << 4)
939 #define ARMV6_PMCR_COUNT1_IEN (1 << 5)
940 #define ARMV6_PMCR_CCOUNT_IEN (1 << 6)
941 #define ARMV6_PMCR_COUNT0_OVERFLOW (1 << 8)
942 #define ARMV6_PMCR_COUNT1_OVERFLOW (1 << 9)
943 #define ARMV6_PMCR_CCOUNT_OVERFLOW (1 << 10)
944 #define ARMV6_PMCR_EVT_COUNT0_SHIFT 20
945 #define ARMV6_PMCR_EVT_COUNT0_MASK (0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT)
946 #define ARMV6_PMCR_EVT_COUNT1_SHIFT 12
947 #define ARMV6_PMCR_EVT_COUNT1_MASK (0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT)
949 #define ARMV6_PMCR_OVERFLOWED_MASK \
950 (ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \
951 ARMV6_PMCR_CCOUNT_OVERFLOW)
954 armv6_pmcr_has_overflowed(unsigned long pmcr
)
956 return (pmcr
& ARMV6_PMCR_OVERFLOWED_MASK
);
960 armv6_pmcr_counter_has_overflowed(unsigned long pmcr
,
961 enum armv6_counters counter
)
965 if (ARMV6_CYCLE_COUNTER
== counter
)
966 ret
= pmcr
& ARMV6_PMCR_CCOUNT_OVERFLOW
;
967 else if (ARMV6_COUNTER0
== counter
)
968 ret
= pmcr
& ARMV6_PMCR_COUNT0_OVERFLOW
;
969 else if (ARMV6_COUNTER1
== counter
)
970 ret
= pmcr
& ARMV6_PMCR_COUNT1_OVERFLOW
;
972 WARN_ONCE(1, "invalid counter number (%d)\n", counter
);
978 armv6pmu_read_counter(int counter
)
980 unsigned long value
= 0;
982 if (ARMV6_CYCLE_COUNTER
== counter
)
983 asm volatile("mrc p15, 0, %0, c15, c12, 1" : "=r"(value
));
984 else if (ARMV6_COUNTER0
== counter
)
985 asm volatile("mrc p15, 0, %0, c15, c12, 2" : "=r"(value
));
986 else if (ARMV6_COUNTER1
== counter
)
987 asm volatile("mrc p15, 0, %0, c15, c12, 3" : "=r"(value
));
989 WARN_ONCE(1, "invalid counter number (%d)\n", counter
);
995 armv6pmu_write_counter(int counter
,
998 if (ARMV6_CYCLE_COUNTER
== counter
)
999 asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value
));
1000 else if (ARMV6_COUNTER0
== counter
)
1001 asm volatile("mcr p15, 0, %0, c15, c12, 2" : : "r"(value
));
1002 else if (ARMV6_COUNTER1
== counter
)
1003 asm volatile("mcr p15, 0, %0, c15, c12, 3" : : "r"(value
));
1005 WARN_ONCE(1, "invalid counter number (%d)\n", counter
);
1009 armv6pmu_enable_event(struct hw_perf_event
*hwc
,
1012 unsigned long val
, mask
, evt
, flags
;
1014 if (ARMV6_CYCLE_COUNTER
== idx
) {
1016 evt
= ARMV6_PMCR_CCOUNT_IEN
;
1017 } else if (ARMV6_COUNTER0
== idx
) {
1018 mask
= ARMV6_PMCR_EVT_COUNT0_MASK
;
1019 evt
= (hwc
->config_base
<< ARMV6_PMCR_EVT_COUNT0_SHIFT
) |
1020 ARMV6_PMCR_COUNT0_IEN
;
1021 } else if (ARMV6_COUNTER1
== idx
) {
1022 mask
= ARMV6_PMCR_EVT_COUNT1_MASK
;
1023 evt
= (hwc
->config_base
<< ARMV6_PMCR_EVT_COUNT1_SHIFT
) |
1024 ARMV6_PMCR_COUNT1_IEN
;
1026 WARN_ONCE(1, "invalid counter number (%d)\n", idx
);
1031 * Mask out the current event and set the counter to count the event
1032 * that we're interested in.
1034 spin_lock_irqsave(&pmu_lock
, flags
);
1035 val
= armv6_pmcr_read();
1038 armv6_pmcr_write(val
);
1039 spin_unlock_irqrestore(&pmu_lock
, flags
);
1043 armv6pmu_handle_irq(int irq_num
,
1046 unsigned long pmcr
= armv6_pmcr_read();
1047 struct perf_sample_data data
;
1048 struct cpu_hw_events
*cpuc
;
1049 struct pt_regs
*regs
;
1052 if (!armv6_pmcr_has_overflowed(pmcr
))
1055 regs
= get_irq_regs();
1058 * The interrupts are cleared by writing the overflow flags back to
1059 * the control register. All of the other bits don't have any effect
1060 * if they are rewritten, so write the whole value back.
1062 armv6_pmcr_write(pmcr
);
1064 perf_sample_data_init(&data
, 0);
1066 cpuc
= &__get_cpu_var(cpu_hw_events
);
1067 for (idx
= 0; idx
<= armpmu
->num_events
; ++idx
) {
1068 struct perf_event
*event
= cpuc
->events
[idx
];
1069 struct hw_perf_event
*hwc
;
1071 if (!test_bit(idx
, cpuc
->active_mask
))
1075 * We have a single interrupt for all counters. Check that
1076 * each counter has overflowed before we process it.
1078 if (!armv6_pmcr_counter_has_overflowed(pmcr
, idx
))
1082 armpmu_event_update(event
, hwc
, idx
);
1083 data
.period
= event
->hw
.last_period
;
1084 if (!armpmu_event_set_period(event
, hwc
, idx
))
1087 if (perf_event_overflow(event
, 0, &data
, regs
))
1088 armpmu
->disable(hwc
, idx
);
1092 * Handle the pending perf events.
1094 * Note: this call *must* be run with interrupts disabled. For
1095 * platforms that can have the PMU interrupts raised as an NMI, this
1104 armv6pmu_start(void)
1106 unsigned long flags
, val
;
1108 spin_lock_irqsave(&pmu_lock
, flags
);
1109 val
= armv6_pmcr_read();
1110 val
|= ARMV6_PMCR_ENABLE
;
1111 armv6_pmcr_write(val
);
1112 spin_unlock_irqrestore(&pmu_lock
, flags
);
1118 unsigned long flags
, val
;
1120 spin_lock_irqsave(&pmu_lock
, flags
);
1121 val
= armv6_pmcr_read();
1122 val
&= ~ARMV6_PMCR_ENABLE
;
1123 armv6_pmcr_write(val
);
1124 spin_unlock_irqrestore(&pmu_lock
, flags
);
1128 armv6pmu_get_event_idx(struct cpu_hw_events
*cpuc
,
1129 struct hw_perf_event
*event
)
1131 /* Always place a cycle counter into the cycle counter. */
1132 if (ARMV6_PERFCTR_CPU_CYCLES
== event
->config_base
) {
1133 if (test_and_set_bit(ARMV6_CYCLE_COUNTER
, cpuc
->used_mask
))
1136 return ARMV6_CYCLE_COUNTER
;
1139 * For anything other than a cycle counter, try and use
1140 * counter0 and counter1.
1142 if (!test_and_set_bit(ARMV6_COUNTER1
, cpuc
->used_mask
)) {
1143 return ARMV6_COUNTER1
;
1146 if (!test_and_set_bit(ARMV6_COUNTER0
, cpuc
->used_mask
)) {
1147 return ARMV6_COUNTER0
;
1150 /* The counters are all in use. */
1156 armv6pmu_disable_event(struct hw_perf_event
*hwc
,
1159 unsigned long val
, mask
, evt
, flags
;
1161 if (ARMV6_CYCLE_COUNTER
== idx
) {
1162 mask
= ARMV6_PMCR_CCOUNT_IEN
;
1164 } else if (ARMV6_COUNTER0
== idx
) {
1165 mask
= ARMV6_PMCR_COUNT0_IEN
| ARMV6_PMCR_EVT_COUNT0_MASK
;
1166 evt
= ARMV6_PERFCTR_NOP
<< ARMV6_PMCR_EVT_COUNT0_SHIFT
;
1167 } else if (ARMV6_COUNTER1
== idx
) {
1168 mask
= ARMV6_PMCR_COUNT1_IEN
| ARMV6_PMCR_EVT_COUNT1_MASK
;
1169 evt
= ARMV6_PERFCTR_NOP
<< ARMV6_PMCR_EVT_COUNT1_SHIFT
;
1171 WARN_ONCE(1, "invalid counter number (%d)\n", idx
);
1176 * Mask out the current event and set the counter to count the number
1177 * of ETM bus signal assertion cycles. The external reporting should
1178 * be disabled and so this should never increment.
1180 spin_lock_irqsave(&pmu_lock
, flags
);
1181 val
= armv6_pmcr_read();
1184 armv6_pmcr_write(val
);
1185 spin_unlock_irqrestore(&pmu_lock
, flags
);
1189 armv6mpcore_pmu_disable_event(struct hw_perf_event
*hwc
,
1192 unsigned long val
, mask
, flags
, evt
= 0;
1194 if (ARMV6_CYCLE_COUNTER
== idx
) {
1195 mask
= ARMV6_PMCR_CCOUNT_IEN
;
1196 } else if (ARMV6_COUNTER0
== idx
) {
1197 mask
= ARMV6_PMCR_COUNT0_IEN
;
1198 } else if (ARMV6_COUNTER1
== idx
) {
1199 mask
= ARMV6_PMCR_COUNT1_IEN
;
1201 WARN_ONCE(1, "invalid counter number (%d)\n", idx
);
1206 * Unlike UP ARMv6, we don't have a way of stopping the counters. We
1207 * simply disable the interrupt reporting.
1209 spin_lock_irqsave(&pmu_lock
, flags
);
1210 val
= armv6_pmcr_read();
1213 armv6_pmcr_write(val
);
1214 spin_unlock_irqrestore(&pmu_lock
, flags
);
1217 static const struct arm_pmu armv6pmu
= {
1218 .id
= ARM_PERF_PMU_ID_V6
,
1220 .handle_irq
= armv6pmu_handle_irq
,
1221 .enable
= armv6pmu_enable_event
,
1222 .disable
= armv6pmu_disable_event
,
1223 .read_counter
= armv6pmu_read_counter
,
1224 .write_counter
= armv6pmu_write_counter
,
1225 .get_event_idx
= armv6pmu_get_event_idx
,
1226 .start
= armv6pmu_start
,
1227 .stop
= armv6pmu_stop
,
1228 .cache_map
= &armv6_perf_cache_map
,
1229 .event_map
= &armv6_perf_map
,
1230 .raw_event_mask
= 0xFF,
1232 .max_period
= (1LLU << 32) - 1,
1235 const struct arm_pmu
*__init
armv6pmu_init(void)
1241 * ARMv6mpcore is almost identical to single core ARMv6 with the exception
1242 * that some of the events have different enumerations and that there is no
1243 * *hack* to stop the programmable counters. To stop the counters we simply
1244 * disable the interrupt reporting and update the event. When unthrottling we
1245 * reset the period and enable the interrupt reporting.
1247 static const struct arm_pmu armv6mpcore_pmu
= {
1248 .id
= ARM_PERF_PMU_ID_V6MP
,
1250 .handle_irq
= armv6pmu_handle_irq
,
1251 .enable
= armv6pmu_enable_event
,
1252 .disable
= armv6mpcore_pmu_disable_event
,
1253 .read_counter
= armv6pmu_read_counter
,
1254 .write_counter
= armv6pmu_write_counter
,
1255 .get_event_idx
= armv6pmu_get_event_idx
,
1256 .start
= armv6pmu_start
,
1257 .stop
= armv6pmu_stop
,
1258 .cache_map
= &armv6mpcore_perf_cache_map
,
1259 .event_map
= &armv6mpcore_perf_map
,
1260 .raw_event_mask
= 0xFF,
1262 .max_period
= (1LLU << 32) - 1,
1265 const struct arm_pmu
*__init
armv6mpcore_pmu_init(void)
1267 return &armv6mpcore_pmu
;
1271 * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
1273 * Copied from ARMv6 code, with the low level code inspired
1274 * by the ARMv7 Oprofile code.
1276 * Cortex-A8 has up to 4 configurable performance counters and
1277 * a single cycle counter.
1278 * Cortex-A9 has up to 31 configurable performance counters and
1279 * a single cycle counter.
1281 * All counters can be enabled/disabled and IRQ masked separately. The cycle
1282 * counter and all 4 performance counters together can be reset separately.
1285 /* Common ARMv7 event types */
1286 enum armv7_perf_types
{
1287 ARMV7_PERFCTR_PMNC_SW_INCR
= 0x00,
1288 ARMV7_PERFCTR_IFETCH_MISS
= 0x01,
1289 ARMV7_PERFCTR_ITLB_MISS
= 0x02,
1290 ARMV7_PERFCTR_DCACHE_REFILL
= 0x03,
1291 ARMV7_PERFCTR_DCACHE_ACCESS
= 0x04,
1292 ARMV7_PERFCTR_DTLB_REFILL
= 0x05,
1293 ARMV7_PERFCTR_DREAD
= 0x06,
1294 ARMV7_PERFCTR_DWRITE
= 0x07,
1296 ARMV7_PERFCTR_EXC_TAKEN
= 0x09,
1297 ARMV7_PERFCTR_EXC_EXECUTED
= 0x0A,
1298 ARMV7_PERFCTR_CID_WRITE
= 0x0B,
1299 /* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
1301 * - all branch instructions,
1302 * - instructions that explicitly write the PC,
1303 * - exception generating instructions.
1305 ARMV7_PERFCTR_PC_WRITE
= 0x0C,
1306 ARMV7_PERFCTR_PC_IMM_BRANCH
= 0x0D,
1307 ARMV7_PERFCTR_UNALIGNED_ACCESS
= 0x0F,
1308 ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
= 0x10,
1309 ARMV7_PERFCTR_CLOCK_CYCLES
= 0x11,
1311 ARMV7_PERFCTR_PC_BRANCH_MIS_USED
= 0x12,
1313 ARMV7_PERFCTR_CPU_CYCLES
= 0xFF
1316 /* ARMv7 Cortex-A8 specific event types */
1317 enum armv7_a8_perf_types
{
1318 ARMV7_PERFCTR_INSTR_EXECUTED
= 0x08,
1320 ARMV7_PERFCTR_PC_PROC_RETURN
= 0x0E,
1322 ARMV7_PERFCTR_WRITE_BUFFER_FULL
= 0x40,
1323 ARMV7_PERFCTR_L2_STORE_MERGED
= 0x41,
1324 ARMV7_PERFCTR_L2_STORE_BUFF
= 0x42,
1325 ARMV7_PERFCTR_L2_ACCESS
= 0x43,
1326 ARMV7_PERFCTR_L2_CACH_MISS
= 0x44,
1327 ARMV7_PERFCTR_AXI_READ_CYCLES
= 0x45,
1328 ARMV7_PERFCTR_AXI_WRITE_CYCLES
= 0x46,
1329 ARMV7_PERFCTR_MEMORY_REPLAY
= 0x47,
1330 ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY
= 0x48,
1331 ARMV7_PERFCTR_L1_DATA_MISS
= 0x49,
1332 ARMV7_PERFCTR_L1_INST_MISS
= 0x4A,
1333 ARMV7_PERFCTR_L1_DATA_COLORING
= 0x4B,
1334 ARMV7_PERFCTR_L1_NEON_DATA
= 0x4C,
1335 ARMV7_PERFCTR_L1_NEON_CACH_DATA
= 0x4D,
1336 ARMV7_PERFCTR_L2_NEON
= 0x4E,
1337 ARMV7_PERFCTR_L2_NEON_HIT
= 0x4F,
1338 ARMV7_PERFCTR_L1_INST
= 0x50,
1339 ARMV7_PERFCTR_PC_RETURN_MIS_PRED
= 0x51,
1340 ARMV7_PERFCTR_PC_BRANCH_FAILED
= 0x52,
1341 ARMV7_PERFCTR_PC_BRANCH_TAKEN
= 0x53,
1342 ARMV7_PERFCTR_PC_BRANCH_EXECUTED
= 0x54,
1343 ARMV7_PERFCTR_OP_EXECUTED
= 0x55,
1344 ARMV7_PERFCTR_CYCLES_INST_STALL
= 0x56,
1345 ARMV7_PERFCTR_CYCLES_INST
= 0x57,
1346 ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL
= 0x58,
1347 ARMV7_PERFCTR_CYCLES_NEON_INST_STALL
= 0x59,
1348 ARMV7_PERFCTR_NEON_CYCLES
= 0x5A,
1350 ARMV7_PERFCTR_PMU0_EVENTS
= 0x70,
1351 ARMV7_PERFCTR_PMU1_EVENTS
= 0x71,
1352 ARMV7_PERFCTR_PMU_EVENTS
= 0x72,
1355 /* ARMv7 Cortex-A9 specific event types */
1356 enum armv7_a9_perf_types
{
1357 ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC
= 0x40,
1358 ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC
= 0x41,
1359 ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC
= 0x42,
1361 ARMV7_PERFCTR_COHERENT_LINE_MISS
= 0x50,
1362 ARMV7_PERFCTR_COHERENT_LINE_HIT
= 0x51,
1364 ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES
= 0x60,
1365 ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES
= 0x61,
1366 ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES
= 0x62,
1367 ARMV7_PERFCTR_STREX_EXECUTED_PASSED
= 0x63,
1368 ARMV7_PERFCTR_STREX_EXECUTED_FAILED
= 0x64,
1369 ARMV7_PERFCTR_DATA_EVICTION
= 0x65,
1370 ARMV7_PERFCTR_ISSUE_STAGE_NO_INST
= 0x66,
1371 ARMV7_PERFCTR_ISSUE_STAGE_EMPTY
= 0x67,
1372 ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE
= 0x68,
1374 ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS
= 0x6E,
1376 ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST
= 0x70,
1377 ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST
= 0x71,
1378 ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST
= 0x72,
1379 ARMV7_PERFCTR_FP_EXECUTED_INST
= 0x73,
1380 ARMV7_PERFCTR_NEON_EXECUTED_INST
= 0x74,
1382 ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES
= 0x80,
1383 ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES
= 0x81,
1384 ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES
= 0x82,
1385 ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES
= 0x83,
1386 ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES
= 0x84,
1387 ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES
= 0x85,
1388 ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES
= 0x86,
1390 ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES
= 0x8A,
1391 ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES
= 0x8B,
1393 ARMV7_PERFCTR_ISB_INST
= 0x90,
1394 ARMV7_PERFCTR_DSB_INST
= 0x91,
1395 ARMV7_PERFCTR_DMB_INST
= 0x92,
1396 ARMV7_PERFCTR_EXT_INTERRUPTS
= 0x93,
1398 ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED
= 0xA0,
1399 ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED
= 0xA1,
1400 ARMV7_PERFCTR_PLE_FIFO_FLUSH
= 0xA2,
1401 ARMV7_PERFCTR_PLE_RQST_COMPLETED
= 0xA3,
1402 ARMV7_PERFCTR_PLE_FIFO_OVERFLOW
= 0xA4,
1403 ARMV7_PERFCTR_PLE_RQST_PROG
= 0xA5
1407 * Cortex-A8 HW events mapping
1409 * The hardware events that we support. We do support cache operations but
1410 * we have harvard caches and no way to combine instruction and data
1411 * accesses/misses in hardware.
1413 static const unsigned armv7_a8_perf_map
[PERF_COUNT_HW_MAX
] = {
1414 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV7_PERFCTR_CPU_CYCLES
,
1415 [PERF_COUNT_HW_INSTRUCTIONS
] = ARMV7_PERFCTR_INSTR_EXECUTED
,
1416 [PERF_COUNT_HW_CACHE_REFERENCES
] = HW_OP_UNSUPPORTED
,
1417 [PERF_COUNT_HW_CACHE_MISSES
] = HW_OP_UNSUPPORTED
,
1418 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = ARMV7_PERFCTR_PC_WRITE
,
1419 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
1420 [PERF_COUNT_HW_BUS_CYCLES
] = ARMV7_PERFCTR_CLOCK_CYCLES
,
1423 static const unsigned armv7_a8_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
1424 [PERF_COUNT_HW_CACHE_OP_MAX
]
1425 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
1428 * The performance counters don't differentiate between read
1429 * and write accesses/misses so this isn't strictly correct,
1430 * but it's the best we can do. Writes and reads get
1434 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_DCACHE_ACCESS
,
1435 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DCACHE_REFILL
,
1438 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_DCACHE_ACCESS
,
1439 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DCACHE_REFILL
,
1441 [C(OP_PREFETCH
)] = {
1442 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1443 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1448 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_INST
,
1449 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_INST_MISS
,
1452 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_INST
,
1453 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_INST_MISS
,
1455 [C(OP_PREFETCH
)] = {
1456 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1457 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1462 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L2_ACCESS
,
1463 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L2_CACH_MISS
,
1466 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L2_ACCESS
,
1467 [C(RESULT_MISS
)] = ARMV7_PERFCTR_L2_CACH_MISS
,
1469 [C(OP_PREFETCH
)] = {
1470 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1471 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1476 * Only ITLB misses and DTLB refills are supported.
1477 * If users want the DTLB refills misses a raw counter
1481 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1482 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
1485 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1486 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
1488 [C(OP_PREFETCH
)] = {
1489 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1490 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1495 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1496 [C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_MISS
,
1499 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1500 [C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_MISS
,
1502 [C(OP_PREFETCH
)] = {
1503 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1504 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1509 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_WRITE
,
1511 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
1514 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_WRITE
,
1516 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
1518 [C(OP_PREFETCH
)] = {
1519 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1520 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1526 * Cortex-A9 HW events mapping
1528 static const unsigned armv7_a9_perf_map
[PERF_COUNT_HW_MAX
] = {
1529 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV7_PERFCTR_CPU_CYCLES
,
1530 [PERF_COUNT_HW_INSTRUCTIONS
] =
1531 ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE
,
1532 [PERF_COUNT_HW_CACHE_REFERENCES
] = ARMV7_PERFCTR_COHERENT_LINE_HIT
,
1533 [PERF_COUNT_HW_CACHE_MISSES
] = ARMV7_PERFCTR_COHERENT_LINE_MISS
,
1534 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = ARMV7_PERFCTR_PC_WRITE
,
1535 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
1536 [PERF_COUNT_HW_BUS_CYCLES
] = ARMV7_PERFCTR_CLOCK_CYCLES
,
1539 static const unsigned armv7_a9_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
1540 [PERF_COUNT_HW_CACHE_OP_MAX
]
1541 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
1544 * The performance counters don't differentiate between read
1545 * and write accesses/misses so this isn't strictly correct,
1546 * but it's the best we can do. Writes and reads get
1550 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_DCACHE_ACCESS
,
1551 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DCACHE_REFILL
,
1554 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_DCACHE_ACCESS
,
1555 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DCACHE_REFILL
,
1557 [C(OP_PREFETCH
)] = {
1558 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1559 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1564 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1565 [C(RESULT_MISS
)] = ARMV7_PERFCTR_IFETCH_MISS
,
1568 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1569 [C(RESULT_MISS
)] = ARMV7_PERFCTR_IFETCH_MISS
,
1571 [C(OP_PREFETCH
)] = {
1572 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1573 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1578 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1579 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1582 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1583 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1585 [C(OP_PREFETCH
)] = {
1586 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1587 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1592 * Only ITLB misses and DTLB refills are supported.
1593 * If users want the DTLB refills misses a raw counter
1597 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1598 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
1601 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1602 [C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
1604 [C(OP_PREFETCH
)] = {
1605 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1606 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1611 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1612 [C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_MISS
,
1615 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1616 [C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_MISS
,
1618 [C(OP_PREFETCH
)] = {
1619 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1620 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1625 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_WRITE
,
1627 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
1630 [C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_WRITE
,
1632 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
1634 [C(OP_PREFETCH
)] = {
1635 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
1636 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
1642 * Perf Events counters
1644 enum armv7_counters
{
1645 ARMV7_CYCLE_COUNTER
= 1, /* Cycle counter */
1646 ARMV7_COUNTER0
= 2, /* First event counter */
1650 * The cycle counter is ARMV7_CYCLE_COUNTER.
1651 * The first event counter is ARMV7_COUNTER0.
1652 * The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1).
1654 #define ARMV7_COUNTER_LAST (ARMV7_COUNTER0 + armpmu->num_events - 1)
1657 * ARMv7 low level PMNC access
1661 * Per-CPU PMNC: config reg
1663 #define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
1664 #define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
1665 #define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
1666 #define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
1667 #define ARMV7_PMNC_X (1 << 4) /* Export to ETM */
1668 #define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
1669 #define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */
1670 #define ARMV7_PMNC_N_MASK 0x1f
1671 #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
1674 * Available counters
1676 #define ARMV7_CNT0 0 /* First event counter */
1677 #define ARMV7_CCNT 31 /* Cycle counter */
1679 /* Perf Event to low level counters mapping */
1680 #define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0)
1683 * CNTENS: counters enable reg
1685 #define ARMV7_CNTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1686 #define ARMV7_CNTENS_C (1 << ARMV7_CCNT)
1689 * CNTENC: counters disable reg
1691 #define ARMV7_CNTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1692 #define ARMV7_CNTENC_C (1 << ARMV7_CCNT)
1695 * INTENS: counters overflow interrupt enable reg
1697 #define ARMV7_INTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1698 #define ARMV7_INTENS_C (1 << ARMV7_CCNT)
1701 * INTENC: counters overflow interrupt disable reg
1703 #define ARMV7_INTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1704 #define ARMV7_INTENC_C (1 << ARMV7_CCNT)
1707 * EVTSEL: Event selection reg
1709 #define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */
1712 * SELECT: Counter selection reg
1714 #define ARMV7_SELECT_MASK 0x1f /* Mask for writable bits */
1717 * FLAG: counters overflow flag status reg
1719 #define ARMV7_FLAG_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1720 #define ARMV7_FLAG_C (1 << ARMV7_CCNT)
1721 #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
1722 #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
1724 static inline unsigned long armv7_pmnc_read(void)
1727 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val
));
1731 static inline void armv7_pmnc_write(unsigned long val
)
1733 val
&= ARMV7_PMNC_MASK
;
1734 asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val
));
1737 static inline int armv7_pmnc_has_overflowed(unsigned long pmnc
)
1739 return pmnc
& ARMV7_OVERFLOWED_MASK
;
1742 static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc
,
1743 enum armv7_counters counter
)
1747 if (counter
== ARMV7_CYCLE_COUNTER
)
1748 ret
= pmnc
& ARMV7_FLAG_C
;
1749 else if ((counter
>= ARMV7_COUNTER0
) && (counter
<= ARMV7_COUNTER_LAST
))
1750 ret
= pmnc
& ARMV7_FLAG_P(counter
);
1752 pr_err("CPU%u checking wrong counter %d overflow status\n",
1753 smp_processor_id(), counter
);
1758 static inline int armv7_pmnc_select_counter(unsigned int idx
)
1762 if ((idx
< ARMV7_COUNTER0
) || (idx
> ARMV7_COUNTER_LAST
)) {
1763 pr_err("CPU%u selecting wrong PMNC counter"
1764 " %d\n", smp_processor_id(), idx
);
1768 val
= (idx
- ARMV7_EVENT_CNT_TO_CNTx
) & ARMV7_SELECT_MASK
;
1769 asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val
));
1774 static inline u32
armv7pmu_read_counter(int idx
)
1776 unsigned long value
= 0;
1778 if (idx
== ARMV7_CYCLE_COUNTER
)
1779 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value
));
1780 else if ((idx
>= ARMV7_COUNTER0
) && (idx
<= ARMV7_COUNTER_LAST
)) {
1781 if (armv7_pmnc_select_counter(idx
) == idx
)
1782 asm volatile("mrc p15, 0, %0, c9, c13, 2"
1785 pr_err("CPU%u reading wrong counter %d\n",
1786 smp_processor_id(), idx
);
1791 static inline void armv7pmu_write_counter(int idx
, u32 value
)
1793 if (idx
== ARMV7_CYCLE_COUNTER
)
1794 asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value
));
1795 else if ((idx
>= ARMV7_COUNTER0
) && (idx
<= ARMV7_COUNTER_LAST
)) {
1796 if (armv7_pmnc_select_counter(idx
) == idx
)
1797 asm volatile("mcr p15, 0, %0, c9, c13, 2"
1800 pr_err("CPU%u writing wrong counter %d\n",
1801 smp_processor_id(), idx
);
1804 static inline void armv7_pmnc_write_evtsel(unsigned int idx
, u32 val
)
1806 if (armv7_pmnc_select_counter(idx
) == idx
) {
1807 val
&= ARMV7_EVTSEL_MASK
;
1808 asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val
));
1812 static inline u32
armv7_pmnc_enable_counter(unsigned int idx
)
1816 if ((idx
!= ARMV7_CYCLE_COUNTER
) &&
1817 ((idx
< ARMV7_COUNTER0
) || (idx
> ARMV7_COUNTER_LAST
))) {
1818 pr_err("CPU%u enabling wrong PMNC counter"
1819 " %d\n", smp_processor_id(), idx
);
1823 if (idx
== ARMV7_CYCLE_COUNTER
)
1824 val
= ARMV7_CNTENS_C
;
1826 val
= ARMV7_CNTENS_P(idx
);
1828 asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val
));
1833 static inline u32
armv7_pmnc_disable_counter(unsigned int idx
)
1838 if ((idx
!= ARMV7_CYCLE_COUNTER
) &&
1839 ((idx
< ARMV7_COUNTER0
) || (idx
> ARMV7_COUNTER_LAST
))) {
1840 pr_err("CPU%u disabling wrong PMNC counter"
1841 " %d\n", smp_processor_id(), idx
);
1845 if (idx
== ARMV7_CYCLE_COUNTER
)
1846 val
= ARMV7_CNTENC_C
;
1848 val
= ARMV7_CNTENC_P(idx
);
1850 asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val
));
1855 static inline u32
armv7_pmnc_enable_intens(unsigned int idx
)
1859 if ((idx
!= ARMV7_CYCLE_COUNTER
) &&
1860 ((idx
< ARMV7_COUNTER0
) || (idx
> ARMV7_COUNTER_LAST
))) {
1861 pr_err("CPU%u enabling wrong PMNC counter"
1862 " interrupt enable %d\n", smp_processor_id(), idx
);
1866 if (idx
== ARMV7_CYCLE_COUNTER
)
1867 val
= ARMV7_INTENS_C
;
1869 val
= ARMV7_INTENS_P(idx
);
1871 asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val
));
1876 static inline u32
armv7_pmnc_disable_intens(unsigned int idx
)
1880 if ((idx
!= ARMV7_CYCLE_COUNTER
) &&
1881 ((idx
< ARMV7_COUNTER0
) || (idx
> ARMV7_COUNTER_LAST
))) {
1882 pr_err("CPU%u disabling wrong PMNC counter"
1883 " interrupt enable %d\n", smp_processor_id(), idx
);
1887 if (idx
== ARMV7_CYCLE_COUNTER
)
1888 val
= ARMV7_INTENC_C
;
1890 val
= ARMV7_INTENC_P(idx
);
1892 asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val
));
1897 static inline u32
armv7_pmnc_getreset_flags(void)
1902 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val
));
1904 /* Write to clear flags */
1905 val
&= ARMV7_FLAG_MASK
;
1906 asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val
));
1912 static void armv7_pmnc_dump_regs(void)
1917 printk(KERN_INFO
"PMNC registers dump:\n");
1919 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val
));
1920 printk(KERN_INFO
"PMNC =0x%08x\n", val
);
1922 asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val
));
1923 printk(KERN_INFO
"CNTENS=0x%08x\n", val
);
1925 asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val
));
1926 printk(KERN_INFO
"INTENS=0x%08x\n", val
);
1928 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val
));
1929 printk(KERN_INFO
"FLAGS =0x%08x\n", val
);
1931 asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val
));
1932 printk(KERN_INFO
"SELECT=0x%08x\n", val
);
1934 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val
));
1935 printk(KERN_INFO
"CCNT =0x%08x\n", val
);
1937 for (cnt
= ARMV7_COUNTER0
; cnt
< ARMV7_COUNTER_LAST
; cnt
++) {
1938 armv7_pmnc_select_counter(cnt
);
1939 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val
));
1940 printk(KERN_INFO
"CNT[%d] count =0x%08x\n",
1941 cnt
-ARMV7_EVENT_CNT_TO_CNTx
, val
);
1942 asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val
));
1943 printk(KERN_INFO
"CNT[%d] evtsel=0x%08x\n",
1944 cnt
-ARMV7_EVENT_CNT_TO_CNTx
, val
);
1949 void armv7pmu_enable_event(struct hw_perf_event
*hwc
, int idx
)
1951 unsigned long flags
;
1954 * Enable counter and interrupt, and set the counter to count
1955 * the event that we're interested in.
1957 spin_lock_irqsave(&pmu_lock
, flags
);
1962 armv7_pmnc_disable_counter(idx
);
1965 * Set event (if destined for PMNx counters)
1966 * We don't need to set the event if it's a cycle count
1968 if (idx
!= ARMV7_CYCLE_COUNTER
)
1969 armv7_pmnc_write_evtsel(idx
, hwc
->config_base
);
1972 * Enable interrupt for this counter
1974 armv7_pmnc_enable_intens(idx
);
1979 armv7_pmnc_enable_counter(idx
);
1981 spin_unlock_irqrestore(&pmu_lock
, flags
);
1984 static void armv7pmu_disable_event(struct hw_perf_event
*hwc
, int idx
)
1986 unsigned long flags
;
1989 * Disable counter and interrupt
1991 spin_lock_irqsave(&pmu_lock
, flags
);
1996 armv7_pmnc_disable_counter(idx
);
1999 * Disable interrupt for this counter
2001 armv7_pmnc_disable_intens(idx
);
2003 spin_unlock_irqrestore(&pmu_lock
, flags
);
2006 static irqreturn_t
armv7pmu_handle_irq(int irq_num
, void *dev
)
2009 struct perf_sample_data data
;
2010 struct cpu_hw_events
*cpuc
;
2011 struct pt_regs
*regs
;
2015 * Get and reset the IRQ flags
2017 pmnc
= armv7_pmnc_getreset_flags();
2020 * Did an overflow occur?
2022 if (!armv7_pmnc_has_overflowed(pmnc
))
2026 * Handle the counter(s) overflow(s)
2028 regs
= get_irq_regs();
2030 perf_sample_data_init(&data
, 0);
2032 cpuc
= &__get_cpu_var(cpu_hw_events
);
2033 for (idx
= 0; idx
<= armpmu
->num_events
; ++idx
) {
2034 struct perf_event
*event
= cpuc
->events
[idx
];
2035 struct hw_perf_event
*hwc
;
2037 if (!test_bit(idx
, cpuc
->active_mask
))
2041 * We have a single interrupt for all counters. Check that
2042 * each counter has overflowed before we process it.
2044 if (!armv7_pmnc_counter_has_overflowed(pmnc
, idx
))
2048 armpmu_event_update(event
, hwc
, idx
);
2049 data
.period
= event
->hw
.last_period
;
2050 if (!armpmu_event_set_period(event
, hwc
, idx
))
2053 if (perf_event_overflow(event
, 0, &data
, regs
))
2054 armpmu
->disable(hwc
, idx
);
2058 * Handle the pending perf events.
2060 * Note: this call *must* be run with interrupts disabled. For
2061 * platforms that can have the PMU interrupts raised as an NMI, this
2069 static void armv7pmu_start(void)
2071 unsigned long flags
;
2073 spin_lock_irqsave(&pmu_lock
, flags
);
2074 /* Enable all counters */
2075 armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E
);
2076 spin_unlock_irqrestore(&pmu_lock
, flags
);
2079 static void armv7pmu_stop(void)
2081 unsigned long flags
;
2083 spin_lock_irqsave(&pmu_lock
, flags
);
2084 /* Disable all counters */
2085 armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E
);
2086 spin_unlock_irqrestore(&pmu_lock
, flags
);
2089 static int armv7pmu_get_event_idx(struct cpu_hw_events
*cpuc
,
2090 struct hw_perf_event
*event
)
2094 /* Always place a cycle counter into the cycle counter. */
2095 if (event
->config_base
== ARMV7_PERFCTR_CPU_CYCLES
) {
2096 if (test_and_set_bit(ARMV7_CYCLE_COUNTER
, cpuc
->used_mask
))
2099 return ARMV7_CYCLE_COUNTER
;
2102 * For anything other than a cycle counter, try and use
2103 * the events counters
2105 for (idx
= ARMV7_COUNTER0
; idx
<= armpmu
->num_events
; ++idx
) {
2106 if (!test_and_set_bit(idx
, cpuc
->used_mask
))
2110 /* The counters are all in use. */
2115 static struct arm_pmu armv7pmu
= {
2116 .handle_irq
= armv7pmu_handle_irq
,
2117 .enable
= armv7pmu_enable_event
,
2118 .disable
= armv7pmu_disable_event
,
2119 .read_counter
= armv7pmu_read_counter
,
2120 .write_counter
= armv7pmu_write_counter
,
2121 .get_event_idx
= armv7pmu_get_event_idx
,
2122 .start
= armv7pmu_start
,
2123 .stop
= armv7pmu_stop
,
2124 .raw_event_mask
= 0xFF,
2125 .max_period
= (1LLU << 32) - 1,
2128 static u32 __init
armv7_reset_read_pmnc(void)
2132 /* Initialize & Reset PMNC: C and P bits */
2133 armv7_pmnc_write(ARMV7_PMNC_P
| ARMV7_PMNC_C
);
2135 /* Read the nb of CNTx counters supported from PMNC */
2136 nb_cnt
= (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT
) & ARMV7_PMNC_N_MASK
;
2138 /* Add the CPU cycles counter and return */
2142 const struct arm_pmu
*__init
armv7_a8_pmu_init(void)
2144 armv7pmu
.id
= ARM_PERF_PMU_ID_CA8
;
2145 armv7pmu
.name
= "ARMv7 Cortex-A8";
2146 armv7pmu
.cache_map
= &armv7_a8_perf_cache_map
;
2147 armv7pmu
.event_map
= &armv7_a8_perf_map
;
2148 armv7pmu
.num_events
= armv7_reset_read_pmnc();
2152 const struct arm_pmu
*__init
armv7_a9_pmu_init(void)
2154 armv7pmu
.id
= ARM_PERF_PMU_ID_CA9
;
2155 armv7pmu
.name
= "ARMv7 Cortex-A9";
2156 armv7pmu
.cache_map
= &armv7_a9_perf_cache_map
;
2157 armv7pmu
.event_map
= &armv7_a9_perf_map
;
2158 armv7pmu
.num_events
= armv7_reset_read_pmnc();
2164 * ARMv5 [xscale] Performance counter handling code.
2166 * Based on xscale OProfile code.
2168 * There are two variants of the xscale PMU that we support:
2169 * - xscale1pmu: 2 event counters and a cycle counter
2170 * - xscale2pmu: 4 event counters and a cycle counter
2171 * The two variants share event definitions, but have different
2175 enum xscale_perf_types
{
2176 XSCALE_PERFCTR_ICACHE_MISS
= 0x00,
2177 XSCALE_PERFCTR_ICACHE_NO_DELIVER
= 0x01,
2178 XSCALE_PERFCTR_DATA_STALL
= 0x02,
2179 XSCALE_PERFCTR_ITLB_MISS
= 0x03,
2180 XSCALE_PERFCTR_DTLB_MISS
= 0x04,
2181 XSCALE_PERFCTR_BRANCH
= 0x05,
2182 XSCALE_PERFCTR_BRANCH_MISS
= 0x06,
2183 XSCALE_PERFCTR_INSTRUCTION
= 0x07,
2184 XSCALE_PERFCTR_DCACHE_FULL_STALL
= 0x08,
2185 XSCALE_PERFCTR_DCACHE_FULL_STALL_CONTIG
= 0x09,
2186 XSCALE_PERFCTR_DCACHE_ACCESS
= 0x0A,
2187 XSCALE_PERFCTR_DCACHE_MISS
= 0x0B,
2188 XSCALE_PERFCTR_DCACHE_WRITE_BACK
= 0x0C,
2189 XSCALE_PERFCTR_PC_CHANGED
= 0x0D,
2190 XSCALE_PERFCTR_BCU_REQUEST
= 0x10,
2191 XSCALE_PERFCTR_BCU_FULL
= 0x11,
2192 XSCALE_PERFCTR_BCU_DRAIN
= 0x12,
2193 XSCALE_PERFCTR_BCU_ECC_NO_ELOG
= 0x14,
2194 XSCALE_PERFCTR_BCU_1_BIT_ERR
= 0x15,
2195 XSCALE_PERFCTR_RMW
= 0x16,
2196 /* XSCALE_PERFCTR_CCNT is not hardware defined */
2197 XSCALE_PERFCTR_CCNT
= 0xFE,
2198 XSCALE_PERFCTR_UNUSED
= 0xFF,
2201 enum xscale_counters
{
2202 XSCALE_CYCLE_COUNTER
= 1,
2209 static const unsigned xscale_perf_map
[PERF_COUNT_HW_MAX
] = {
2210 [PERF_COUNT_HW_CPU_CYCLES
] = XSCALE_PERFCTR_CCNT
,
2211 [PERF_COUNT_HW_INSTRUCTIONS
] = XSCALE_PERFCTR_INSTRUCTION
,
2212 [PERF_COUNT_HW_CACHE_REFERENCES
] = HW_OP_UNSUPPORTED
,
2213 [PERF_COUNT_HW_CACHE_MISSES
] = HW_OP_UNSUPPORTED
,
2214 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = XSCALE_PERFCTR_BRANCH
,
2215 [PERF_COUNT_HW_BRANCH_MISSES
] = XSCALE_PERFCTR_BRANCH_MISS
,
2216 [PERF_COUNT_HW_BUS_CYCLES
] = HW_OP_UNSUPPORTED
,
2219 static const unsigned xscale_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
2220 [PERF_COUNT_HW_CACHE_OP_MAX
]
2221 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
2224 [C(RESULT_ACCESS
)] = XSCALE_PERFCTR_DCACHE_ACCESS
,
2225 [C(RESULT_MISS
)] = XSCALE_PERFCTR_DCACHE_MISS
,
2228 [C(RESULT_ACCESS
)] = XSCALE_PERFCTR_DCACHE_ACCESS
,
2229 [C(RESULT_MISS
)] = XSCALE_PERFCTR_DCACHE_MISS
,
2231 [C(OP_PREFETCH
)] = {
2232 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2233 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
2238 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2239 [C(RESULT_MISS
)] = XSCALE_PERFCTR_ICACHE_MISS
,
2242 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2243 [C(RESULT_MISS
)] = XSCALE_PERFCTR_ICACHE_MISS
,
2245 [C(OP_PREFETCH
)] = {
2246 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2247 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
2252 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2253 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
2256 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2257 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
2259 [C(OP_PREFETCH
)] = {
2260 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2261 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
2266 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2267 [C(RESULT_MISS
)] = XSCALE_PERFCTR_DTLB_MISS
,
2270 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2271 [C(RESULT_MISS
)] = XSCALE_PERFCTR_DTLB_MISS
,
2273 [C(OP_PREFETCH
)] = {
2274 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2275 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
2280 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2281 [C(RESULT_MISS
)] = XSCALE_PERFCTR_ITLB_MISS
,
2284 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2285 [C(RESULT_MISS
)] = XSCALE_PERFCTR_ITLB_MISS
,
2287 [C(OP_PREFETCH
)] = {
2288 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2289 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
2294 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2295 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
2298 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2299 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
2301 [C(OP_PREFETCH
)] = {
2302 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
2303 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
2308 #define XSCALE_PMU_ENABLE 0x001
2309 #define XSCALE_PMN_RESET 0x002
2310 #define XSCALE_CCNT_RESET 0x004
2311 #define XSCALE_PMU_RESET (CCNT_RESET | PMN_RESET)
2312 #define XSCALE_PMU_CNT64 0x008
2314 #define XSCALE1_OVERFLOWED_MASK 0x700
2315 #define XSCALE1_CCOUNT_OVERFLOW 0x400
2316 #define XSCALE1_COUNT0_OVERFLOW 0x100
2317 #define XSCALE1_COUNT1_OVERFLOW 0x200
2318 #define XSCALE1_CCOUNT_INT_EN 0x040
2319 #define XSCALE1_COUNT0_INT_EN 0x010
2320 #define XSCALE1_COUNT1_INT_EN 0x020
2321 #define XSCALE1_COUNT0_EVT_SHFT 12
2322 #define XSCALE1_COUNT0_EVT_MASK (0xff << XSCALE1_COUNT0_EVT_SHFT)
2323 #define XSCALE1_COUNT1_EVT_SHFT 20
2324 #define XSCALE1_COUNT1_EVT_MASK (0xff << XSCALE1_COUNT1_EVT_SHFT)
2327 xscale1pmu_read_pmnc(void)
2330 asm volatile("mrc p14, 0, %0, c0, c0, 0" : "=r" (val
));
2335 xscale1pmu_write_pmnc(u32 val
)
2337 /* upper 4bits and 7, 11 are write-as-0 */
2339 asm volatile("mcr p14, 0, %0, c0, c0, 0" : : "r" (val
));
2343 xscale1_pmnc_counter_has_overflowed(unsigned long pmnc
,
2344 enum xscale_counters counter
)
2349 case XSCALE_CYCLE_COUNTER
:
2350 ret
= pmnc
& XSCALE1_CCOUNT_OVERFLOW
;
2352 case XSCALE_COUNTER0
:
2353 ret
= pmnc
& XSCALE1_COUNT0_OVERFLOW
;
2355 case XSCALE_COUNTER1
:
2356 ret
= pmnc
& XSCALE1_COUNT1_OVERFLOW
;
2359 WARN_ONCE(1, "invalid counter number (%d)\n", counter
);
2366 xscale1pmu_handle_irq(int irq_num
, void *dev
)
2369 struct perf_sample_data data
;
2370 struct cpu_hw_events
*cpuc
;
2371 struct pt_regs
*regs
;
2375 * NOTE: there's an A stepping erratum that states if an overflow
2376 * bit already exists and another occurs, the previous
2377 * Overflow bit gets cleared. There's no workaround.
2378 * Fixed in B stepping or later.
2380 pmnc
= xscale1pmu_read_pmnc();
2383 * Write the value back to clear the overflow flags. Overflow
2384 * flags remain in pmnc for use below. We also disable the PMU
2385 * while we process the interrupt.
2387 xscale1pmu_write_pmnc(pmnc
& ~XSCALE_PMU_ENABLE
);
2389 if (!(pmnc
& XSCALE1_OVERFLOWED_MASK
))
2392 regs
= get_irq_regs();
2394 perf_sample_data_init(&data
, 0);
2396 cpuc
= &__get_cpu_var(cpu_hw_events
);
2397 for (idx
= 0; idx
<= armpmu
->num_events
; ++idx
) {
2398 struct perf_event
*event
= cpuc
->events
[idx
];
2399 struct hw_perf_event
*hwc
;
2401 if (!test_bit(idx
, cpuc
->active_mask
))
2404 if (!xscale1_pmnc_counter_has_overflowed(pmnc
, idx
))
2408 armpmu_event_update(event
, hwc
, idx
);
2409 data
.period
= event
->hw
.last_period
;
2410 if (!armpmu_event_set_period(event
, hwc
, idx
))
2413 if (perf_event_overflow(event
, 0, &data
, regs
))
2414 armpmu
->disable(hwc
, idx
);
2420 * Re-enable the PMU.
2422 pmnc
= xscale1pmu_read_pmnc() | XSCALE_PMU_ENABLE
;
2423 xscale1pmu_write_pmnc(pmnc
);
2429 xscale1pmu_enable_event(struct hw_perf_event
*hwc
, int idx
)
2431 unsigned long val
, mask
, evt
, flags
;
2434 case XSCALE_CYCLE_COUNTER
:
2436 evt
= XSCALE1_CCOUNT_INT_EN
;
2438 case XSCALE_COUNTER0
:
2439 mask
= XSCALE1_COUNT0_EVT_MASK
;
2440 evt
= (hwc
->config_base
<< XSCALE1_COUNT0_EVT_SHFT
) |
2441 XSCALE1_COUNT0_INT_EN
;
2443 case XSCALE_COUNTER1
:
2444 mask
= XSCALE1_COUNT1_EVT_MASK
;
2445 evt
= (hwc
->config_base
<< XSCALE1_COUNT1_EVT_SHFT
) |
2446 XSCALE1_COUNT1_INT_EN
;
2449 WARN_ONCE(1, "invalid counter number (%d)\n", idx
);
2453 spin_lock_irqsave(&pmu_lock
, flags
);
2454 val
= xscale1pmu_read_pmnc();
2457 xscale1pmu_write_pmnc(val
);
2458 spin_unlock_irqrestore(&pmu_lock
, flags
);
2462 xscale1pmu_disable_event(struct hw_perf_event
*hwc
, int idx
)
2464 unsigned long val
, mask
, evt
, flags
;
2467 case XSCALE_CYCLE_COUNTER
:
2468 mask
= XSCALE1_CCOUNT_INT_EN
;
2471 case XSCALE_COUNTER0
:
2472 mask
= XSCALE1_COUNT0_INT_EN
| XSCALE1_COUNT0_EVT_MASK
;
2473 evt
= XSCALE_PERFCTR_UNUSED
<< XSCALE1_COUNT0_EVT_SHFT
;
2475 case XSCALE_COUNTER1
:
2476 mask
= XSCALE1_COUNT1_INT_EN
| XSCALE1_COUNT1_EVT_MASK
;
2477 evt
= XSCALE_PERFCTR_UNUSED
<< XSCALE1_COUNT1_EVT_SHFT
;
2480 WARN_ONCE(1, "invalid counter number (%d)\n", idx
);
2484 spin_lock_irqsave(&pmu_lock
, flags
);
2485 val
= xscale1pmu_read_pmnc();
2488 xscale1pmu_write_pmnc(val
);
2489 spin_unlock_irqrestore(&pmu_lock
, flags
);
2493 xscale1pmu_get_event_idx(struct cpu_hw_events
*cpuc
,
2494 struct hw_perf_event
*event
)
2496 if (XSCALE_PERFCTR_CCNT
== event
->config_base
) {
2497 if (test_and_set_bit(XSCALE_CYCLE_COUNTER
, cpuc
->used_mask
))
2500 return XSCALE_CYCLE_COUNTER
;
2502 if (!test_and_set_bit(XSCALE_COUNTER1
, cpuc
->used_mask
)) {
2503 return XSCALE_COUNTER1
;
2506 if (!test_and_set_bit(XSCALE_COUNTER0
, cpuc
->used_mask
)) {
2507 return XSCALE_COUNTER0
;
2515 xscale1pmu_start(void)
2517 unsigned long flags
, val
;
2519 spin_lock_irqsave(&pmu_lock
, flags
);
2520 val
= xscale1pmu_read_pmnc();
2521 val
|= XSCALE_PMU_ENABLE
;
2522 xscale1pmu_write_pmnc(val
);
2523 spin_unlock_irqrestore(&pmu_lock
, flags
);
2527 xscale1pmu_stop(void)
2529 unsigned long flags
, val
;
2531 spin_lock_irqsave(&pmu_lock
, flags
);
2532 val
= xscale1pmu_read_pmnc();
2533 val
&= ~XSCALE_PMU_ENABLE
;
2534 xscale1pmu_write_pmnc(val
);
2535 spin_unlock_irqrestore(&pmu_lock
, flags
);
2539 xscale1pmu_read_counter(int counter
)
2544 case XSCALE_CYCLE_COUNTER
:
2545 asm volatile("mrc p14, 0, %0, c1, c0, 0" : "=r" (val
));
2547 case XSCALE_COUNTER0
:
2548 asm volatile("mrc p14, 0, %0, c2, c0, 0" : "=r" (val
));
2550 case XSCALE_COUNTER1
:
2551 asm volatile("mrc p14, 0, %0, c3, c0, 0" : "=r" (val
));
2559 xscale1pmu_write_counter(int counter
, u32 val
)
2562 case XSCALE_CYCLE_COUNTER
:
2563 asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val
));
2565 case XSCALE_COUNTER0
:
2566 asm volatile("mcr p14, 0, %0, c2, c0, 0" : : "r" (val
));
2568 case XSCALE_COUNTER1
:
2569 asm volatile("mcr p14, 0, %0, c3, c0, 0" : : "r" (val
));
2574 static const struct arm_pmu xscale1pmu
= {
2575 .id
= ARM_PERF_PMU_ID_XSCALE1
,
2577 .handle_irq
= xscale1pmu_handle_irq
,
2578 .enable
= xscale1pmu_enable_event
,
2579 .disable
= xscale1pmu_disable_event
,
2580 .read_counter
= xscale1pmu_read_counter
,
2581 .write_counter
= xscale1pmu_write_counter
,
2582 .get_event_idx
= xscale1pmu_get_event_idx
,
2583 .start
= xscale1pmu_start
,
2584 .stop
= xscale1pmu_stop
,
2585 .cache_map
= &xscale_perf_cache_map
,
2586 .event_map
= &xscale_perf_map
,
2587 .raw_event_mask
= 0xFF,
2589 .max_period
= (1LLU << 32) - 1,
2592 const struct arm_pmu
*__init
xscale1pmu_init(void)
2597 #define XSCALE2_OVERFLOWED_MASK 0x01f
2598 #define XSCALE2_CCOUNT_OVERFLOW 0x001
2599 #define XSCALE2_COUNT0_OVERFLOW 0x002
2600 #define XSCALE2_COUNT1_OVERFLOW 0x004
2601 #define XSCALE2_COUNT2_OVERFLOW 0x008
2602 #define XSCALE2_COUNT3_OVERFLOW 0x010
2603 #define XSCALE2_CCOUNT_INT_EN 0x001
2604 #define XSCALE2_COUNT0_INT_EN 0x002
2605 #define XSCALE2_COUNT1_INT_EN 0x004
2606 #define XSCALE2_COUNT2_INT_EN 0x008
2607 #define XSCALE2_COUNT3_INT_EN 0x010
2608 #define XSCALE2_COUNT0_EVT_SHFT 0
2609 #define XSCALE2_COUNT0_EVT_MASK (0xff << XSCALE2_COUNT0_EVT_SHFT)
2610 #define XSCALE2_COUNT1_EVT_SHFT 8
2611 #define XSCALE2_COUNT1_EVT_MASK (0xff << XSCALE2_COUNT1_EVT_SHFT)
2612 #define XSCALE2_COUNT2_EVT_SHFT 16
2613 #define XSCALE2_COUNT2_EVT_MASK (0xff << XSCALE2_COUNT2_EVT_SHFT)
2614 #define XSCALE2_COUNT3_EVT_SHFT 24
2615 #define XSCALE2_COUNT3_EVT_MASK (0xff << XSCALE2_COUNT3_EVT_SHFT)
2618 xscale2pmu_read_pmnc(void)
2621 asm volatile("mrc p14, 0, %0, c0, c1, 0" : "=r" (val
));
2622 /* bits 1-2 and 4-23 are read-unpredictable */
2623 return val
& 0xff000009;
2627 xscale2pmu_write_pmnc(u32 val
)
2629 /* bits 4-23 are write-as-0, 24-31 are write ignored */
2631 asm volatile("mcr p14, 0, %0, c0, c1, 0" : : "r" (val
));
2635 xscale2pmu_read_overflow_flags(void)
2638 asm volatile("mrc p14, 0, %0, c5, c1, 0" : "=r" (val
));
2643 xscale2pmu_write_overflow_flags(u32 val
)
2645 asm volatile("mcr p14, 0, %0, c5, c1, 0" : : "r" (val
));
2649 xscale2pmu_read_event_select(void)
2652 asm volatile("mrc p14, 0, %0, c8, c1, 0" : "=r" (val
));
2657 xscale2pmu_write_event_select(u32 val
)
2659 asm volatile("mcr p14, 0, %0, c8, c1, 0" : : "r"(val
));
2663 xscale2pmu_read_int_enable(void)
2666 asm volatile("mrc p14, 0, %0, c4, c1, 0" : "=r" (val
));
2671 xscale2pmu_write_int_enable(u32 val
)
2673 asm volatile("mcr p14, 0, %0, c4, c1, 0" : : "r" (val
));
2677 xscale2_pmnc_counter_has_overflowed(unsigned long of_flags
,
2678 enum xscale_counters counter
)
2683 case XSCALE_CYCLE_COUNTER
:
2684 ret
= of_flags
& XSCALE2_CCOUNT_OVERFLOW
;
2686 case XSCALE_COUNTER0
:
2687 ret
= of_flags
& XSCALE2_COUNT0_OVERFLOW
;
2689 case XSCALE_COUNTER1
:
2690 ret
= of_flags
& XSCALE2_COUNT1_OVERFLOW
;
2692 case XSCALE_COUNTER2
:
2693 ret
= of_flags
& XSCALE2_COUNT2_OVERFLOW
;
2695 case XSCALE_COUNTER3
:
2696 ret
= of_flags
& XSCALE2_COUNT3_OVERFLOW
;
2699 WARN_ONCE(1, "invalid counter number (%d)\n", counter
);
2706 xscale2pmu_handle_irq(int irq_num
, void *dev
)
2708 unsigned long pmnc
, of_flags
;
2709 struct perf_sample_data data
;
2710 struct cpu_hw_events
*cpuc
;
2711 struct pt_regs
*regs
;
2714 /* Disable the PMU. */
2715 pmnc
= xscale2pmu_read_pmnc();
2716 xscale2pmu_write_pmnc(pmnc
& ~XSCALE_PMU_ENABLE
);
2718 /* Check the overflow flag register. */
2719 of_flags
= xscale2pmu_read_overflow_flags();
2720 if (!(of_flags
& XSCALE2_OVERFLOWED_MASK
))
2723 /* Clear the overflow bits. */
2724 xscale2pmu_write_overflow_flags(of_flags
);
2726 regs
= get_irq_regs();
2728 perf_sample_data_init(&data
, 0);
2730 cpuc
= &__get_cpu_var(cpu_hw_events
);
2731 for (idx
= 0; idx
<= armpmu
->num_events
; ++idx
) {
2732 struct perf_event
*event
= cpuc
->events
[idx
];
2733 struct hw_perf_event
*hwc
;
2735 if (!test_bit(idx
, cpuc
->active_mask
))
2738 if (!xscale2_pmnc_counter_has_overflowed(pmnc
, idx
))
2742 armpmu_event_update(event
, hwc
, idx
);
2743 data
.period
= event
->hw
.last_period
;
2744 if (!armpmu_event_set_period(event
, hwc
, idx
))
2747 if (perf_event_overflow(event
, 0, &data
, regs
))
2748 armpmu
->disable(hwc
, idx
);
2754 * Re-enable the PMU.
2756 pmnc
= xscale2pmu_read_pmnc() | XSCALE_PMU_ENABLE
;
2757 xscale2pmu_write_pmnc(pmnc
);
2763 xscale2pmu_enable_event(struct hw_perf_event
*hwc
, int idx
)
2765 unsigned long flags
, ien
, evtsel
;
2767 ien
= xscale2pmu_read_int_enable();
2768 evtsel
= xscale2pmu_read_event_select();
2771 case XSCALE_CYCLE_COUNTER
:
2772 ien
|= XSCALE2_CCOUNT_INT_EN
;
2774 case XSCALE_COUNTER0
:
2775 ien
|= XSCALE2_COUNT0_INT_EN
;
2776 evtsel
&= ~XSCALE2_COUNT0_EVT_MASK
;
2777 evtsel
|= hwc
->config_base
<< XSCALE2_COUNT0_EVT_SHFT
;
2779 case XSCALE_COUNTER1
:
2780 ien
|= XSCALE2_COUNT1_INT_EN
;
2781 evtsel
&= ~XSCALE2_COUNT1_EVT_MASK
;
2782 evtsel
|= hwc
->config_base
<< XSCALE2_COUNT1_EVT_SHFT
;
2784 case XSCALE_COUNTER2
:
2785 ien
|= XSCALE2_COUNT2_INT_EN
;
2786 evtsel
&= ~XSCALE2_COUNT2_EVT_MASK
;
2787 evtsel
|= hwc
->config_base
<< XSCALE2_COUNT2_EVT_SHFT
;
2789 case XSCALE_COUNTER3
:
2790 ien
|= XSCALE2_COUNT3_INT_EN
;
2791 evtsel
&= ~XSCALE2_COUNT3_EVT_MASK
;
2792 evtsel
|= hwc
->config_base
<< XSCALE2_COUNT3_EVT_SHFT
;
2795 WARN_ONCE(1, "invalid counter number (%d)\n", idx
);
2799 spin_lock_irqsave(&pmu_lock
, flags
);
2800 xscale2pmu_write_event_select(evtsel
);
2801 xscale2pmu_write_int_enable(ien
);
2802 spin_unlock_irqrestore(&pmu_lock
, flags
);
2806 xscale2pmu_disable_event(struct hw_perf_event
*hwc
, int idx
)
2808 unsigned long flags
, ien
, evtsel
;
2810 ien
= xscale2pmu_read_int_enable();
2811 evtsel
= xscale2pmu_read_event_select();
2814 case XSCALE_CYCLE_COUNTER
:
2815 ien
&= ~XSCALE2_CCOUNT_INT_EN
;
2817 case XSCALE_COUNTER0
:
2818 ien
&= ~XSCALE2_COUNT0_INT_EN
;
2819 evtsel
&= ~XSCALE2_COUNT0_EVT_MASK
;
2820 evtsel
|= XSCALE_PERFCTR_UNUSED
<< XSCALE2_COUNT0_EVT_SHFT
;
2822 case XSCALE_COUNTER1
:
2823 ien
&= ~XSCALE2_COUNT1_INT_EN
;
2824 evtsel
&= ~XSCALE2_COUNT1_EVT_MASK
;
2825 evtsel
|= XSCALE_PERFCTR_UNUSED
<< XSCALE2_COUNT1_EVT_SHFT
;
2827 case XSCALE_COUNTER2
:
2828 ien
&= ~XSCALE2_COUNT2_INT_EN
;
2829 evtsel
&= ~XSCALE2_COUNT2_EVT_MASK
;
2830 evtsel
|= XSCALE_PERFCTR_UNUSED
<< XSCALE2_COUNT2_EVT_SHFT
;
2832 case XSCALE_COUNTER3
:
2833 ien
&= ~XSCALE2_COUNT3_INT_EN
;
2834 evtsel
&= ~XSCALE2_COUNT3_EVT_MASK
;
2835 evtsel
|= XSCALE_PERFCTR_UNUSED
<< XSCALE2_COUNT3_EVT_SHFT
;
2838 WARN_ONCE(1, "invalid counter number (%d)\n", idx
);
2842 spin_lock_irqsave(&pmu_lock
, flags
);
2843 xscale2pmu_write_event_select(evtsel
);
2844 xscale2pmu_write_int_enable(ien
);
2845 spin_unlock_irqrestore(&pmu_lock
, flags
);
2849 xscale2pmu_get_event_idx(struct cpu_hw_events
*cpuc
,
2850 struct hw_perf_event
*event
)
2852 int idx
= xscale1pmu_get_event_idx(cpuc
, event
);
2856 if (!test_and_set_bit(XSCALE_COUNTER3
, cpuc
->used_mask
))
2857 idx
= XSCALE_COUNTER3
;
2858 else if (!test_and_set_bit(XSCALE_COUNTER2
, cpuc
->used_mask
))
2859 idx
= XSCALE_COUNTER2
;
2865 xscale2pmu_start(void)
2867 unsigned long flags
, val
;
2869 spin_lock_irqsave(&pmu_lock
, flags
);
2870 val
= xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64
;
2871 val
|= XSCALE_PMU_ENABLE
;
2872 xscale2pmu_write_pmnc(val
);
2873 spin_unlock_irqrestore(&pmu_lock
, flags
);
2877 xscale2pmu_stop(void)
2879 unsigned long flags
, val
;
2881 spin_lock_irqsave(&pmu_lock
, flags
);
2882 val
= xscale2pmu_read_pmnc();
2883 val
&= ~XSCALE_PMU_ENABLE
;
2884 xscale2pmu_write_pmnc(val
);
2885 spin_unlock_irqrestore(&pmu_lock
, flags
);
2889 xscale2pmu_read_counter(int counter
)
2894 case XSCALE_CYCLE_COUNTER
:
2895 asm volatile("mrc p14, 0, %0, c1, c1, 0" : "=r" (val
));
2897 case XSCALE_COUNTER0
:
2898 asm volatile("mrc p14, 0, %0, c0, c2, 0" : "=r" (val
));
2900 case XSCALE_COUNTER1
:
2901 asm volatile("mrc p14, 0, %0, c1, c2, 0" : "=r" (val
));
2903 case XSCALE_COUNTER2
:
2904 asm volatile("mrc p14, 0, %0, c2, c2, 0" : "=r" (val
));
2906 case XSCALE_COUNTER3
:
2907 asm volatile("mrc p14, 0, %0, c3, c2, 0" : "=r" (val
));
2915 xscale2pmu_write_counter(int counter
, u32 val
)
2918 case XSCALE_CYCLE_COUNTER
:
2919 asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val
));
2921 case XSCALE_COUNTER0
:
2922 asm volatile("mcr p14, 0, %0, c0, c2, 0" : : "r" (val
));
2924 case XSCALE_COUNTER1
:
2925 asm volatile("mcr p14, 0, %0, c1, c2, 0" : : "r" (val
));
2927 case XSCALE_COUNTER2
:
2928 asm volatile("mcr p14, 0, %0, c2, c2, 0" : : "r" (val
));
2930 case XSCALE_COUNTER3
:
2931 asm volatile("mcr p14, 0, %0, c3, c2, 0" : : "r" (val
));
2936 static const struct arm_pmu xscale2pmu
= {
2937 .id
= ARM_PERF_PMU_ID_XSCALE2
,
2939 .handle_irq
= xscale2pmu_handle_irq
,
2940 .enable
= xscale2pmu_enable_event
,
2941 .disable
= xscale2pmu_disable_event
,
2942 .read_counter
= xscale2pmu_read_counter
,
2943 .write_counter
= xscale2pmu_write_counter
,
2944 .get_event_idx
= xscale2pmu_get_event_idx
,
2945 .start
= xscale2pmu_start
,
2946 .stop
= xscale2pmu_stop
,
2947 .cache_map
= &xscale_perf_cache_map
,
2948 .event_map
= &xscale_perf_map
,
2949 .raw_event_mask
= 0xFF,
2951 .max_period
= (1LLU << 32) - 1,
2954 const struct arm_pmu
*__init
xscale2pmu_init(void)
2960 init_hw_perf_events(void)
2962 unsigned long cpuid
= read_cpuid_id();
2963 unsigned long implementor
= (cpuid
& 0xFF000000) >> 24;
2964 unsigned long part_number
= (cpuid
& 0xFFF0);
2967 if (0x41 == implementor
) {
2968 switch (part_number
) {
2969 case 0xB360: /* ARM1136 */
2970 case 0xB560: /* ARM1156 */
2971 case 0xB760: /* ARM1176 */
2972 armpmu
= armv6pmu_init();
2974 case 0xB020: /* ARM11mpcore */
2975 armpmu
= armv6mpcore_pmu_init();
2977 case 0xC080: /* Cortex-A8 */
2978 armpmu
= armv7_a8_pmu_init();
2980 case 0xC090: /* Cortex-A9 */
2981 armpmu
= armv7_a9_pmu_init();
2984 /* Intel CPUs [xscale]. */
2985 } else if (0x69 == implementor
) {
2986 part_number
= (cpuid
>> 13) & 0x7;
2987 switch (part_number
) {
2989 armpmu
= xscale1pmu_init();
2992 armpmu
= xscale2pmu_init();
2998 pr_info("enabled with %s PMU driver, %d counters available\n",
2999 armpmu
->name
, armpmu
->num_events
);
3001 pr_info("no hardware support available\n");
3004 perf_pmu_register(&pmu
);
3008 arch_initcall(init_hw_perf_events
);
3011 * Callchain handling code.
3015 * The registers we're interested in are at the end of the variable
3016 * length saved register structure. The fp points at the end of this
3017 * structure so the address of this struct is:
3018 * (struct frame_tail *)(xxx->fp)-1
3020 * This code has been adapted from the ARM OProfile support.
3023 struct frame_tail
*fp
;
3026 } __attribute__((packed
));
3029 * Get the return address for a single stackframe and return a pointer to the
3032 static struct frame_tail
*
3033 user_backtrace(struct frame_tail
*tail
,
3034 struct perf_callchain_entry
*entry
)
3036 struct frame_tail buftail
;
3038 /* Also check accessibility of one struct frame_tail beyond */
3039 if (!access_ok(VERIFY_READ
, tail
, sizeof(buftail
)))
3041 if (__copy_from_user_inatomic(&buftail
, tail
, sizeof(buftail
)))
3044 perf_callchain_store(entry
, buftail
.lr
);
3047 * Frame pointers should strictly progress back up the stack
3048 * (towards higher addresses).
3050 if (tail
>= buftail
.fp
)
3053 return buftail
.fp
- 1;
3057 perf_callchain_user(struct perf_callchain_entry
*entry
, struct pt_regs
*regs
)
3059 struct frame_tail
*tail
;
3062 tail
= (struct frame_tail
*)regs
->ARM_fp
- 1;
3064 while (tail
&& !((unsigned long)tail
& 0x3))
3065 tail
= user_backtrace(tail
, entry
);
3069 * Gets called by walk_stackframe() for every stackframe. This will be called
3070 * whist unwinding the stackframe and is like a subroutine return so we use
3074 callchain_trace(struct stackframe
*fr
,
3077 struct perf_callchain_entry
*entry
= data
;
3078 perf_callchain_store(entry
, fr
->pc
);
3083 perf_callchain_kernel(struct perf_callchain_entry
*entry
, struct pt_regs
*regs
)
3085 struct stackframe fr
;
3087 fr
.fp
= regs
->ARM_fp
;
3088 fr
.sp
= regs
->ARM_sp
;
3089 fr
.lr
= regs
->ARM_lr
;
3090 fr
.pc
= regs
->ARM_pc
;
3091 walk_stackframe(&fr
, callchain_trace
, entry
);