2 * Linux performance counter support for MIPS.
4 * Copyright (C) 2010 MIPS Technologies, Inc.
5 * Copyright (C) 2011 Cavium Networks, Inc.
6 * Author: Deng-Cheng Zhu
8 * This code is based on the implementation for ARM, which is in turn
9 * based on the sparc64 perf event code and the x86 code. Performance
10 * counter access is based on the MIPS Oprofile code. And the callchain
11 * support references the code of MIPS stacktrace.c.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #include <linux/cpumask.h>
19 #include <linux/interrupt.h>
20 #include <linux/smp.h>
21 #include <linux/kernel.h>
22 #include <linux/perf_event.h>
23 #include <linux/uaccess.h>
26 #include <asm/irq_regs.h>
27 #include <asm/stacktrace.h>
28 #include <asm/time.h> /* For perf_irq */
30 #define MIPS_MAX_HWEVENTS 4
32 struct cpu_hw_events
{
33 /* Array of events on this cpu. */
34 struct perf_event
*events
[MIPS_MAX_HWEVENTS
];
37 * Set the bit (indexed by the counter number) when the counter
38 * is used for an event.
40 unsigned long used_mask
[BITS_TO_LONGS(MIPS_MAX_HWEVENTS
)];
43 * Software copy of the control register for each performance counter.
44 * MIPS CPUs vary in performance counters. They use this differently,
45 * and even may not use it.
47 unsigned int saved_ctrl
[MIPS_MAX_HWEVENTS
];
49 DEFINE_PER_CPU(struct cpu_hw_events
, cpu_hw_events
) = {
53 /* The description of MIPS performance events. */
54 struct mips_perf_event
{
55 unsigned int event_id
;
57 * MIPS performance counters are indexed starting from 0.
58 * CNTR_EVEN indicates the indexes of the counters to be used are
61 unsigned int cntr_mask
;
62 #define CNTR_EVEN 0x55555555
63 #define CNTR_ODD 0xaaaaaaaa
64 #define CNTR_ALL 0xffffffff
65 #ifdef CONFIG_MIPS_MT_SMP
78 static struct mips_perf_event raw_event
;
79 static DEFINE_MUTEX(raw_event_mutex
);
81 #define C(x) PERF_COUNT_HW_CACHE_##x
89 u64 (*read_counter
)(unsigned int idx
);
90 void (*write_counter
)(unsigned int idx
, u64 val
);
91 const struct mips_perf_event
*(*map_raw_event
)(u64 config
);
92 const struct mips_perf_event (*general_event_map
)[PERF_COUNT_HW_MAX
];
93 const struct mips_perf_event (*cache_event_map
)
94 [PERF_COUNT_HW_CACHE_MAX
]
95 [PERF_COUNT_HW_CACHE_OP_MAX
]
96 [PERF_COUNT_HW_CACHE_RESULT_MAX
];
97 unsigned int num_counters
;
100 static struct mips_pmu mipspmu
;
102 #define M_CONFIG1_PC (1 << 4)
104 #define M_PERFCTL_EXL (1 << 0)
105 #define M_PERFCTL_KERNEL (1 << 1)
106 #define M_PERFCTL_SUPERVISOR (1 << 2)
107 #define M_PERFCTL_USER (1 << 3)
108 #define M_PERFCTL_INTERRUPT_ENABLE (1 << 4)
109 #define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5)
110 #define M_PERFCTL_VPEID(vpe) ((vpe) << 16)
111 #define M_PERFCTL_MT_EN(filter) ((filter) << 20)
112 #define M_TC_EN_ALL M_PERFCTL_MT_EN(0)
113 #define M_TC_EN_VPE M_PERFCTL_MT_EN(1)
114 #define M_TC_EN_TC M_PERFCTL_MT_EN(2)
115 #define M_PERFCTL_TCID(tcid) ((tcid) << 22)
116 #define M_PERFCTL_WIDE (1 << 30)
117 #define M_PERFCTL_MORE (1 << 31)
119 #define M_PERFCTL_COUNT_EVENT_WHENEVER (M_PERFCTL_EXL | \
122 M_PERFCTL_SUPERVISOR | \
123 M_PERFCTL_INTERRUPT_ENABLE)
125 #ifdef CONFIG_MIPS_MT_SMP
126 #define M_PERFCTL_CONFIG_MASK 0x3fff801f
128 #define M_PERFCTL_CONFIG_MASK 0x1f
130 #define M_PERFCTL_EVENT_MASK 0xfe0
133 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
134 static int cpu_has_mipsmt_pertccounters
;
136 static DEFINE_RWLOCK(pmuint_rwlock
);
139 * FIXME: For VSMP, vpe_id() is redefined for Perf-events, because
140 * cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs.
142 #define vpe_id() (cpu_has_mipsmt_pertccounters ? \
143 0 : smp_processor_id())
145 /* Copied from op_model_mipsxx.c */
146 static unsigned int vpe_shift(void)
148 if (num_possible_cpus() > 1)
154 static unsigned int counters_total_to_per_cpu(unsigned int counters
)
156 return counters
>> vpe_shift();
159 #else /* !CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
162 #endif /* CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
164 static void resume_local_counters(void);
165 static void pause_local_counters(void);
166 static irqreturn_t
mipsxx_pmu_handle_irq(int, void *);
167 static int mipsxx_pmu_handle_shared_irq(void);
169 static unsigned int mipsxx_pmu_swizzle_perf_idx(unsigned int idx
)
176 static u64
mipsxx_pmu_read_counter(unsigned int idx
)
178 idx
= mipsxx_pmu_swizzle_perf_idx(idx
);
183 * The counters are unsigned, we must cast to truncate
186 return (u32
)read_c0_perfcntr0();
188 return (u32
)read_c0_perfcntr1();
190 return (u32
)read_c0_perfcntr2();
192 return (u32
)read_c0_perfcntr3();
194 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx
);
199 static u64
mipsxx_pmu_read_counter_64(unsigned int idx
)
201 idx
= mipsxx_pmu_swizzle_perf_idx(idx
);
205 return read_c0_perfcntr0_64();
207 return read_c0_perfcntr1_64();
209 return read_c0_perfcntr2_64();
211 return read_c0_perfcntr3_64();
213 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx
);
218 static void mipsxx_pmu_write_counter(unsigned int idx
, u64 val
)
220 idx
= mipsxx_pmu_swizzle_perf_idx(idx
);
224 write_c0_perfcntr0(val
);
227 write_c0_perfcntr1(val
);
230 write_c0_perfcntr2(val
);
233 write_c0_perfcntr3(val
);
238 static void mipsxx_pmu_write_counter_64(unsigned int idx
, u64 val
)
240 idx
= mipsxx_pmu_swizzle_perf_idx(idx
);
244 write_c0_perfcntr0_64(val
);
247 write_c0_perfcntr1_64(val
);
250 write_c0_perfcntr2_64(val
);
253 write_c0_perfcntr3_64(val
);
258 static unsigned int mipsxx_pmu_read_control(unsigned int idx
)
260 idx
= mipsxx_pmu_swizzle_perf_idx(idx
);
264 return read_c0_perfctrl0();
266 return read_c0_perfctrl1();
268 return read_c0_perfctrl2();
270 return read_c0_perfctrl3();
272 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx
);
277 static void mipsxx_pmu_write_control(unsigned int idx
, unsigned int val
)
279 idx
= mipsxx_pmu_swizzle_perf_idx(idx
);
283 write_c0_perfctrl0(val
);
286 write_c0_perfctrl1(val
);
289 write_c0_perfctrl2(val
);
292 write_c0_perfctrl3(val
);
297 static int mipsxx_pmu_alloc_counter(struct cpu_hw_events
*cpuc
,
298 struct hw_perf_event
*hwc
)
303 * We only need to care the counter mask. The range has been
304 * checked definitely.
306 unsigned long cntr_mask
= (hwc
->event_base
>> 8) & 0xffff;
308 for (i
= mipspmu
.num_counters
- 1; i
>= 0; i
--) {
310 * Note that some MIPS perf events can be counted by both
311 * even and odd counters, wheresas many other are only by
312 * even _or_ odd counters. This introduces an issue that
313 * when the former kind of event takes the counter the
314 * latter kind of event wants to use, then the "counter
315 * allocation" for the latter event will fail. In fact if
316 * they can be dynamically swapped, they both feel happy.
317 * But here we leave this issue alone for now.
319 if (test_bit(i
, &cntr_mask
) &&
320 !test_and_set_bit(i
, cpuc
->used_mask
))
327 static void mipsxx_pmu_enable_event(struct hw_perf_event
*evt
, int idx
)
329 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
331 WARN_ON(idx
< 0 || idx
>= mipspmu
.num_counters
);
333 cpuc
->saved_ctrl
[idx
] = M_PERFCTL_EVENT(evt
->event_base
& 0xff) |
334 (evt
->config_base
& M_PERFCTL_CONFIG_MASK
) |
335 /* Make sure interrupt enabled. */
336 M_PERFCTL_INTERRUPT_ENABLE
;
338 * We do not actually let the counter run. Leave it until start().
342 static void mipsxx_pmu_disable_event(int idx
)
344 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
347 WARN_ON(idx
< 0 || idx
>= mipspmu
.num_counters
);
349 local_irq_save(flags
);
350 cpuc
->saved_ctrl
[idx
] = mipsxx_pmu_read_control(idx
) &
351 ~M_PERFCTL_COUNT_EVENT_WHENEVER
;
352 mipsxx_pmu_write_control(idx
, cpuc
->saved_ctrl
[idx
]);
353 local_irq_restore(flags
);
356 static int mipspmu_event_set_period(struct perf_event
*event
,
357 struct hw_perf_event
*hwc
,
360 u64 left
= local64_read(&hwc
->period_left
);
361 u64 period
= hwc
->sample_period
;
364 if (unlikely((left
+ period
) & (1ULL << 63))) {
365 /* left underflowed by more than period. */
367 local64_set(&hwc
->period_left
, left
);
368 hwc
->last_period
= period
;
370 } else if (unlikely((left
+ period
) <= period
)) {
371 /* left underflowed by less than period. */
373 local64_set(&hwc
->period_left
, left
);
374 hwc
->last_period
= period
;
378 if (left
> mipspmu
.max_period
) {
379 left
= mipspmu
.max_period
;
380 local64_set(&hwc
->period_left
, left
);
383 local64_set(&hwc
->prev_count
, mipspmu
.overflow
- left
);
385 mipspmu
.write_counter(idx
, mipspmu
.overflow
- left
);
387 perf_event_update_userpage(event
);
392 static void mipspmu_event_update(struct perf_event
*event
,
393 struct hw_perf_event
*hwc
,
396 u64 prev_raw_count
, new_raw_count
;
400 prev_raw_count
= local64_read(&hwc
->prev_count
);
401 new_raw_count
= mipspmu
.read_counter(idx
);
403 if (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
404 new_raw_count
) != prev_raw_count
)
407 delta
= new_raw_count
- prev_raw_count
;
409 local64_add(delta
, &event
->count
);
410 local64_sub(delta
, &hwc
->period_left
);
413 static void mipspmu_start(struct perf_event
*event
, int flags
)
415 struct hw_perf_event
*hwc
= &event
->hw
;
417 if (flags
& PERF_EF_RELOAD
)
418 WARN_ON_ONCE(!(hwc
->state
& PERF_HES_UPTODATE
));
422 /* Set the period for the event. */
423 mipspmu_event_set_period(event
, hwc
, hwc
->idx
);
425 /* Enable the event. */
426 mipsxx_pmu_enable_event(hwc
, hwc
->idx
);
429 static void mipspmu_stop(struct perf_event
*event
, int flags
)
431 struct hw_perf_event
*hwc
= &event
->hw
;
433 if (!(hwc
->state
& PERF_HES_STOPPED
)) {
434 /* We are working on a local event. */
435 mipsxx_pmu_disable_event(hwc
->idx
);
437 mipspmu_event_update(event
, hwc
, hwc
->idx
);
438 hwc
->state
|= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
442 static int mipspmu_add(struct perf_event
*event
, int flags
)
444 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
445 struct hw_perf_event
*hwc
= &event
->hw
;
449 perf_pmu_disable(event
->pmu
);
451 /* To look for a free counter for this event. */
452 idx
= mipsxx_pmu_alloc_counter(cpuc
, hwc
);
459 * If there is an event in the counter we are going to use then
460 * make sure it is disabled.
463 mipsxx_pmu_disable_event(idx
);
464 cpuc
->events
[idx
] = event
;
466 hwc
->state
= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
467 if (flags
& PERF_EF_START
)
468 mipspmu_start(event
, PERF_EF_RELOAD
);
470 /* Propagate our changes to the userspace mapping. */
471 perf_event_update_userpage(event
);
474 perf_pmu_enable(event
->pmu
);
478 static void mipspmu_del(struct perf_event
*event
, int flags
)
480 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
481 struct hw_perf_event
*hwc
= &event
->hw
;
484 WARN_ON(idx
< 0 || idx
>= mipspmu
.num_counters
);
486 mipspmu_stop(event
, PERF_EF_UPDATE
);
487 cpuc
->events
[idx
] = NULL
;
488 clear_bit(idx
, cpuc
->used_mask
);
490 perf_event_update_userpage(event
);
493 static void mipspmu_read(struct perf_event
*event
)
495 struct hw_perf_event
*hwc
= &event
->hw
;
497 /* Don't read disabled counters! */
501 mipspmu_event_update(event
, hwc
, hwc
->idx
);
504 static void mipspmu_enable(struct pmu
*pmu
)
506 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
507 write_unlock(&pmuint_rwlock
);
509 resume_local_counters();
513 * MIPS performance counters can be per-TC. The control registers can
514 * not be directly accessed accross CPUs. Hence if we want to do global
515 * control, we need cross CPU calls. on_each_cpu() can help us, but we
516 * can not make sure this function is called with interrupts enabled. So
517 * here we pause local counters and then grab a rwlock and leave the
518 * counters on other CPUs alone. If any counter interrupt raises while
519 * we own the write lock, simply pause local counters on that CPU and
520 * spin in the handler. Also we know we won't be switched to another
521 * CPU after pausing local counters and before grabbing the lock.
523 static void mipspmu_disable(struct pmu
*pmu
)
525 pause_local_counters();
526 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
527 write_lock(&pmuint_rwlock
);
531 static atomic_t active_events
= ATOMIC_INIT(0);
532 static DEFINE_MUTEX(pmu_reserve_mutex
);
533 static int (*save_perf_irq
)(void);
535 static int mipspmu_get_irq(void)
539 if (mipspmu
.irq
>= 0) {
540 /* Request my own irq handler. */
541 err
= request_irq(mipspmu
.irq
, mipsxx_pmu_handle_irq
,
542 IRQF_PERCPU
| IRQF_NOBALANCING
,
543 "mips_perf_pmu", NULL
);
545 pr_warning("Unable to request IRQ%d for MIPS "
546 "performance counters!\n", mipspmu
.irq
);
548 } else if (cp0_perfcount_irq
< 0) {
550 * We are sharing the irq number with the timer interrupt.
552 save_perf_irq
= perf_irq
;
553 perf_irq
= mipsxx_pmu_handle_shared_irq
;
556 pr_warning("The platform hasn't properly defined its "
557 "interrupt controller.\n");
564 static void mipspmu_free_irq(void)
566 if (mipspmu
.irq
>= 0)
567 free_irq(mipspmu
.irq
, NULL
);
568 else if (cp0_perfcount_irq
< 0)
569 perf_irq
= save_perf_irq
;
573 * mipsxx/rm9000/loongson2 have different performance counters, they have
574 * specific low-level init routines.
576 static void reset_counters(void *arg
);
577 static int __hw_perf_event_init(struct perf_event
*event
);
579 static void hw_perf_event_destroy(struct perf_event
*event
)
581 if (atomic_dec_and_mutex_lock(&active_events
,
582 &pmu_reserve_mutex
)) {
584 * We must not call the destroy function with interrupts
587 on_each_cpu(reset_counters
,
588 (void *)(long)mipspmu
.num_counters
, 1);
590 mutex_unlock(&pmu_reserve_mutex
);
594 static int mipspmu_event_init(struct perf_event
*event
)
598 /* does not support taken branch sampling */
599 if (has_branch_stack(event
))
602 switch (event
->attr
.type
) {
604 case PERF_TYPE_HARDWARE
:
605 case PERF_TYPE_HW_CACHE
:
612 if (event
->cpu
>= nr_cpumask_bits
||
613 (event
->cpu
>= 0 && !cpu_online(event
->cpu
)))
616 if (!atomic_inc_not_zero(&active_events
)) {
617 mutex_lock(&pmu_reserve_mutex
);
618 if (atomic_read(&active_events
) == 0)
619 err
= mipspmu_get_irq();
622 atomic_inc(&active_events
);
623 mutex_unlock(&pmu_reserve_mutex
);
629 return __hw_perf_event_init(event
);
632 static struct pmu pmu
= {
633 .pmu_enable
= mipspmu_enable
,
634 .pmu_disable
= mipspmu_disable
,
635 .event_init
= mipspmu_event_init
,
638 .start
= mipspmu_start
,
639 .stop
= mipspmu_stop
,
640 .read
= mipspmu_read
,
643 static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event
*pev
)
646 * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
649 #ifdef CONFIG_MIPS_MT_SMP
650 return ((unsigned int)pev
->range
<< 24) |
651 (pev
->cntr_mask
& 0xffff00) |
652 (pev
->event_id
& 0xff);
654 return (pev
->cntr_mask
& 0xffff00) |
655 (pev
->event_id
& 0xff);
659 static const struct mips_perf_event
*mipspmu_map_general_event(int idx
)
662 if ((*mipspmu
.general_event_map
)[idx
].cntr_mask
== 0)
663 return ERR_PTR(-EOPNOTSUPP
);
664 return &(*mipspmu
.general_event_map
)[idx
];
667 static const struct mips_perf_event
*mipspmu_map_cache_event(u64 config
)
669 unsigned int cache_type
, cache_op
, cache_result
;
670 const struct mips_perf_event
*pev
;
672 cache_type
= (config
>> 0) & 0xff;
673 if (cache_type
>= PERF_COUNT_HW_CACHE_MAX
)
674 return ERR_PTR(-EINVAL
);
676 cache_op
= (config
>> 8) & 0xff;
677 if (cache_op
>= PERF_COUNT_HW_CACHE_OP_MAX
)
678 return ERR_PTR(-EINVAL
);
680 cache_result
= (config
>> 16) & 0xff;
681 if (cache_result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
682 return ERR_PTR(-EINVAL
);
684 pev
= &((*mipspmu
.cache_event_map
)
689 if (pev
->cntr_mask
== 0)
690 return ERR_PTR(-EOPNOTSUPP
);
696 static int validate_group(struct perf_event
*event
)
698 struct perf_event
*sibling
, *leader
= event
->group_leader
;
699 struct cpu_hw_events fake_cpuc
;
701 memset(&fake_cpuc
, 0, sizeof(fake_cpuc
));
703 if (mipsxx_pmu_alloc_counter(&fake_cpuc
, &leader
->hw
) < 0)
706 list_for_each_entry(sibling
, &leader
->sibling_list
, group_entry
) {
707 if (mipsxx_pmu_alloc_counter(&fake_cpuc
, &sibling
->hw
) < 0)
711 if (mipsxx_pmu_alloc_counter(&fake_cpuc
, &event
->hw
) < 0)
717 /* This is needed by specific irq handlers in perf_event_*.c */
718 static void handle_associated_event(struct cpu_hw_events
*cpuc
,
719 int idx
, struct perf_sample_data
*data
,
720 struct pt_regs
*regs
)
722 struct perf_event
*event
= cpuc
->events
[idx
];
723 struct hw_perf_event
*hwc
= &event
->hw
;
725 mipspmu_event_update(event
, hwc
, idx
);
726 data
->period
= event
->hw
.last_period
;
727 if (!mipspmu_event_set_period(event
, hwc
, idx
))
730 if (perf_event_overflow(event
, data
, regs
))
731 mipsxx_pmu_disable_event(idx
);
735 static int __n_counters(void)
737 if (!(read_c0_config1() & M_CONFIG1_PC
))
739 if (!(read_c0_perfctrl0() & M_PERFCTL_MORE
))
741 if (!(read_c0_perfctrl1() & M_PERFCTL_MORE
))
743 if (!(read_c0_perfctrl2() & M_PERFCTL_MORE
))
749 static int n_counters(void)
753 switch (current_cpu_type()) {
764 counters
= __n_counters();
770 static void reset_counters(void *arg
)
772 int counters
= (int)(long)arg
;
775 mipsxx_pmu_write_control(3, 0);
776 mipspmu
.write_counter(3, 0);
778 mipsxx_pmu_write_control(2, 0);
779 mipspmu
.write_counter(2, 0);
781 mipsxx_pmu_write_control(1, 0);
782 mipspmu
.write_counter(1, 0);
784 mipsxx_pmu_write_control(0, 0);
785 mipspmu
.write_counter(0, 0);
789 /* 24K/34K/1004K cores can share the same event map. */
790 static const struct mips_perf_event mipsxxcore_event_map
791 [PERF_COUNT_HW_MAX
] = {
792 [PERF_COUNT_HW_CPU_CYCLES
] = { 0x00, CNTR_EVEN
| CNTR_ODD
, P
},
793 [PERF_COUNT_HW_INSTRUCTIONS
] = { 0x01, CNTR_EVEN
| CNTR_ODD
, T
},
794 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = { 0x02, CNTR_EVEN
, T
},
795 [PERF_COUNT_HW_BRANCH_MISSES
] = { 0x02, CNTR_ODD
, T
},
798 /* 74K core has different branch event code. */
799 static const struct mips_perf_event mipsxx74Kcore_event_map
800 [PERF_COUNT_HW_MAX
] = {
801 [PERF_COUNT_HW_CPU_CYCLES
] = { 0x00, CNTR_EVEN
| CNTR_ODD
, P
},
802 [PERF_COUNT_HW_INSTRUCTIONS
] = { 0x01, CNTR_EVEN
| CNTR_ODD
, T
},
803 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = { 0x27, CNTR_EVEN
, T
},
804 [PERF_COUNT_HW_BRANCH_MISSES
] = { 0x27, CNTR_ODD
, T
},
807 static const struct mips_perf_event octeon_event_map
[PERF_COUNT_HW_MAX
] = {
808 [PERF_COUNT_HW_CPU_CYCLES
] = { 0x01, CNTR_ALL
},
809 [PERF_COUNT_HW_INSTRUCTIONS
] = { 0x03, CNTR_ALL
},
810 [PERF_COUNT_HW_CACHE_REFERENCES
] = { 0x2b, CNTR_ALL
},
811 [PERF_COUNT_HW_CACHE_MISSES
] = { 0x2e, CNTR_ALL
},
812 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = { 0x08, CNTR_ALL
},
813 [PERF_COUNT_HW_BRANCH_MISSES
] = { 0x09, CNTR_ALL
},
814 [PERF_COUNT_HW_BUS_CYCLES
] = { 0x25, CNTR_ALL
},
817 /* 24K/34K/1004K cores can share the same cache event map. */
818 static const struct mips_perf_event mipsxxcore_cache_map
819 [PERF_COUNT_HW_CACHE_MAX
]
820 [PERF_COUNT_HW_CACHE_OP_MAX
]
821 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
824 * Like some other architectures (e.g. ARM), the performance
825 * counters don't differentiate between read and write
826 * accesses/misses, so this isn't strictly correct, but it's the
827 * best we can do. Writes and reads get combined.
830 [C(RESULT_ACCESS
)] = { 0x0a, CNTR_EVEN
, T
},
831 [C(RESULT_MISS
)] = { 0x0b, CNTR_EVEN
| CNTR_ODD
, T
},
834 [C(RESULT_ACCESS
)] = { 0x0a, CNTR_EVEN
, T
},
835 [C(RESULT_MISS
)] = { 0x0b, CNTR_EVEN
| CNTR_ODD
, T
},
840 [C(RESULT_ACCESS
)] = { 0x09, CNTR_EVEN
, T
},
841 [C(RESULT_MISS
)] = { 0x09, CNTR_ODD
, T
},
844 [C(RESULT_ACCESS
)] = { 0x09, CNTR_EVEN
, T
},
845 [C(RESULT_MISS
)] = { 0x09, CNTR_ODD
, T
},
848 [C(RESULT_ACCESS
)] = { 0x14, CNTR_EVEN
, T
},
850 * Note that MIPS has only "hit" events countable for
851 * the prefetch operation.
857 [C(RESULT_ACCESS
)] = { 0x15, CNTR_ODD
, P
},
858 [C(RESULT_MISS
)] = { 0x16, CNTR_EVEN
, P
},
861 [C(RESULT_ACCESS
)] = { 0x15, CNTR_ODD
, P
},
862 [C(RESULT_MISS
)] = { 0x16, CNTR_EVEN
, P
},
867 [C(RESULT_ACCESS
)] = { 0x06, CNTR_EVEN
, T
},
868 [C(RESULT_MISS
)] = { 0x06, CNTR_ODD
, T
},
871 [C(RESULT_ACCESS
)] = { 0x06, CNTR_EVEN
, T
},
872 [C(RESULT_MISS
)] = { 0x06, CNTR_ODD
, T
},
877 [C(RESULT_ACCESS
)] = { 0x05, CNTR_EVEN
, T
},
878 [C(RESULT_MISS
)] = { 0x05, CNTR_ODD
, T
},
881 [C(RESULT_ACCESS
)] = { 0x05, CNTR_EVEN
, T
},
882 [C(RESULT_MISS
)] = { 0x05, CNTR_ODD
, T
},
886 /* Using the same code for *HW_BRANCH* */
888 [C(RESULT_ACCESS
)] = { 0x02, CNTR_EVEN
, T
},
889 [C(RESULT_MISS
)] = { 0x02, CNTR_ODD
, T
},
892 [C(RESULT_ACCESS
)] = { 0x02, CNTR_EVEN
, T
},
893 [C(RESULT_MISS
)] = { 0x02, CNTR_ODD
, T
},
898 /* 74K core has completely different cache event map. */
899 static const struct mips_perf_event mipsxx74Kcore_cache_map
900 [PERF_COUNT_HW_CACHE_MAX
]
901 [PERF_COUNT_HW_CACHE_OP_MAX
]
902 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
905 * Like some other architectures (e.g. ARM), the performance
906 * counters don't differentiate between read and write
907 * accesses/misses, so this isn't strictly correct, but it's the
908 * best we can do. Writes and reads get combined.
911 [C(RESULT_ACCESS
)] = { 0x17, CNTR_ODD
, T
},
912 [C(RESULT_MISS
)] = { 0x18, CNTR_ODD
, T
},
915 [C(RESULT_ACCESS
)] = { 0x17, CNTR_ODD
, T
},
916 [C(RESULT_MISS
)] = { 0x18, CNTR_ODD
, T
},
921 [C(RESULT_ACCESS
)] = { 0x06, CNTR_EVEN
, T
},
922 [C(RESULT_MISS
)] = { 0x06, CNTR_ODD
, T
},
925 [C(RESULT_ACCESS
)] = { 0x06, CNTR_EVEN
, T
},
926 [C(RESULT_MISS
)] = { 0x06, CNTR_ODD
, T
},
929 [C(RESULT_ACCESS
)] = { 0x34, CNTR_EVEN
, T
},
931 * Note that MIPS has only "hit" events countable for
932 * the prefetch operation.
938 [C(RESULT_ACCESS
)] = { 0x1c, CNTR_ODD
, P
},
939 [C(RESULT_MISS
)] = { 0x1d, CNTR_EVEN
| CNTR_ODD
, P
},
942 [C(RESULT_ACCESS
)] = { 0x1c, CNTR_ODD
, P
},
943 [C(RESULT_MISS
)] = { 0x1d, CNTR_EVEN
| CNTR_ODD
, P
},
948 [C(RESULT_ACCESS
)] = { 0x04, CNTR_EVEN
, T
},
949 [C(RESULT_MISS
)] = { 0x04, CNTR_ODD
, T
},
952 [C(RESULT_ACCESS
)] = { 0x04, CNTR_EVEN
, T
},
953 [C(RESULT_MISS
)] = { 0x04, CNTR_ODD
, T
},
957 /* Using the same code for *HW_BRANCH* */
959 [C(RESULT_ACCESS
)] = { 0x27, CNTR_EVEN
, T
},
960 [C(RESULT_MISS
)] = { 0x27, CNTR_ODD
, T
},
963 [C(RESULT_ACCESS
)] = { 0x27, CNTR_EVEN
, T
},
964 [C(RESULT_MISS
)] = { 0x27, CNTR_ODD
, T
},
970 static const struct mips_perf_event octeon_cache_map
971 [PERF_COUNT_HW_CACHE_MAX
]
972 [PERF_COUNT_HW_CACHE_OP_MAX
]
973 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
976 [C(RESULT_ACCESS
)] = { 0x2b, CNTR_ALL
},
977 [C(RESULT_MISS
)] = { 0x2e, CNTR_ALL
},
980 [C(RESULT_ACCESS
)] = { 0x30, CNTR_ALL
},
985 [C(RESULT_ACCESS
)] = { 0x18, CNTR_ALL
},
988 [C(RESULT_ACCESS
)] = { 0x19, CNTR_ALL
},
993 * Only general DTLB misses are counted use the same event for
997 [C(RESULT_MISS
)] = { 0x35, CNTR_ALL
},
1000 [C(RESULT_MISS
)] = { 0x35, CNTR_ALL
},
1005 [C(RESULT_MISS
)] = { 0x37, CNTR_ALL
},
1010 #ifdef CONFIG_MIPS_MT_SMP
1011 static void check_and_calc_range(struct perf_event
*event
,
1012 const struct mips_perf_event
*pev
)
1014 struct hw_perf_event
*hwc
= &event
->hw
;
1016 if (event
->cpu
>= 0) {
1017 if (pev
->range
> V
) {
1019 * The user selected an event that is processor
1020 * wide, while expecting it to be VPE wide.
1022 hwc
->config_base
|= M_TC_EN_ALL
;
1025 * FIXME: cpu_data[event->cpu].vpe_id reports 0
1028 hwc
->config_base
|= M_PERFCTL_VPEID(event
->cpu
);
1029 hwc
->config_base
|= M_TC_EN_VPE
;
1032 hwc
->config_base
|= M_TC_EN_ALL
;
1035 static void check_and_calc_range(struct perf_event
*event
,
1036 const struct mips_perf_event
*pev
)
1041 static int __hw_perf_event_init(struct perf_event
*event
)
1043 struct perf_event_attr
*attr
= &event
->attr
;
1044 struct hw_perf_event
*hwc
= &event
->hw
;
1045 const struct mips_perf_event
*pev
;
1048 /* Returning MIPS event descriptor for generic perf event. */
1049 if (PERF_TYPE_HARDWARE
== event
->attr
.type
) {
1050 if (event
->attr
.config
>= PERF_COUNT_HW_MAX
)
1052 pev
= mipspmu_map_general_event(event
->attr
.config
);
1053 } else if (PERF_TYPE_HW_CACHE
== event
->attr
.type
) {
1054 pev
= mipspmu_map_cache_event(event
->attr
.config
);
1055 } else if (PERF_TYPE_RAW
== event
->attr
.type
) {
1056 /* We are working on the global raw event. */
1057 mutex_lock(&raw_event_mutex
);
1058 pev
= mipspmu
.map_raw_event(event
->attr
.config
);
1060 /* The event type is not (yet) supported. */
1065 if (PERF_TYPE_RAW
== event
->attr
.type
)
1066 mutex_unlock(&raw_event_mutex
);
1067 return PTR_ERR(pev
);
1071 * We allow max flexibility on how each individual counter shared
1072 * by the single CPU operates (the mode exclusion and the range).
1074 hwc
->config_base
= M_PERFCTL_INTERRUPT_ENABLE
;
1076 /* Calculate range bits and validate it. */
1077 if (num_possible_cpus() > 1)
1078 check_and_calc_range(event
, pev
);
1080 hwc
->event_base
= mipspmu_perf_event_encode(pev
);
1081 if (PERF_TYPE_RAW
== event
->attr
.type
)
1082 mutex_unlock(&raw_event_mutex
);
1084 if (!attr
->exclude_user
)
1085 hwc
->config_base
|= M_PERFCTL_USER
;
1086 if (!attr
->exclude_kernel
) {
1087 hwc
->config_base
|= M_PERFCTL_KERNEL
;
1088 /* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
1089 hwc
->config_base
|= M_PERFCTL_EXL
;
1091 if (!attr
->exclude_hv
)
1092 hwc
->config_base
|= M_PERFCTL_SUPERVISOR
;
1094 hwc
->config_base
&= M_PERFCTL_CONFIG_MASK
;
1096 * The event can belong to another cpu. We do not assign a local
1097 * counter for it for now.
1102 if (!hwc
->sample_period
) {
1103 hwc
->sample_period
= mipspmu
.max_period
;
1104 hwc
->last_period
= hwc
->sample_period
;
1105 local64_set(&hwc
->period_left
, hwc
->sample_period
);
1109 if (event
->group_leader
!= event
)
1110 err
= validate_group(event
);
1112 event
->destroy
= hw_perf_event_destroy
;
1115 event
->destroy(event
);
1120 static void pause_local_counters(void)
1122 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1123 int ctr
= mipspmu
.num_counters
;
1124 unsigned long flags
;
1126 local_irq_save(flags
);
1129 cpuc
->saved_ctrl
[ctr
] = mipsxx_pmu_read_control(ctr
);
1130 mipsxx_pmu_write_control(ctr
, cpuc
->saved_ctrl
[ctr
] &
1131 ~M_PERFCTL_COUNT_EVENT_WHENEVER
);
1133 local_irq_restore(flags
);
1136 static void resume_local_counters(void)
1138 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1139 int ctr
= mipspmu
.num_counters
;
1143 mipsxx_pmu_write_control(ctr
, cpuc
->saved_ctrl
[ctr
]);
1147 static int mipsxx_pmu_handle_shared_irq(void)
1149 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1150 struct perf_sample_data data
;
1151 unsigned int counters
= mipspmu
.num_counters
;
1153 int handled
= IRQ_NONE
;
1154 struct pt_regs
*regs
;
1156 if (cpu_has_perf_cntr_intr_bit
&& !(read_c0_cause() & CAUSEF_PCI
))
1159 * First we pause the local counters, so that when we are locked
1160 * here, the counters are all paused. When it gets locked due to
1161 * perf_disable(), the timer interrupt handler will be delayed.
1163 * See also mipsxx_pmu_start().
1165 pause_local_counters();
1166 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1167 read_lock(&pmuint_rwlock
);
1170 regs
= get_irq_regs();
1172 perf_sample_data_init(&data
, 0, 0);
1175 #define HANDLE_COUNTER(n) \
1177 if (test_bit(n, cpuc->used_mask)) { \
1178 counter = mipspmu.read_counter(n); \
1179 if (counter & mipspmu.overflow) { \
1180 handle_associated_event(cpuc, n, &data, regs); \
1181 handled = IRQ_HANDLED; \
1191 * Do all the work for the pending perf events. We can do this
1192 * in here because the performance counter interrupt is a regular
1193 * interrupt, not NMI.
1195 if (handled
== IRQ_HANDLED
)
1198 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1199 read_unlock(&pmuint_rwlock
);
1201 resume_local_counters();
1205 static irqreturn_t
mipsxx_pmu_handle_irq(int irq
, void *dev
)
1207 return mipsxx_pmu_handle_shared_irq();
1211 #define IS_BOTH_COUNTERS_24K_EVENT(b) \
1212 ((b) == 0 || (b) == 1 || (b) == 11)
1215 #define IS_BOTH_COUNTERS_34K_EVENT(b) \
1216 ((b) == 0 || (b) == 1 || (b) == 11)
1217 #ifdef CONFIG_MIPS_MT_SMP
1218 #define IS_RANGE_P_34K_EVENT(r, b) \
1219 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
1220 (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 || \
1221 (r) == 176 || ((b) >= 50 && (b) <= 55) || \
1222 ((b) >= 64 && (b) <= 67))
1223 #define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
1227 #define IS_BOTH_COUNTERS_74K_EVENT(b) \
1228 ((b) == 0 || (b) == 1)
1231 #define IS_BOTH_COUNTERS_1004K_EVENT(b) \
1232 ((b) == 0 || (b) == 1 || (b) == 11)
1233 #ifdef CONFIG_MIPS_MT_SMP
1234 #define IS_RANGE_P_1004K_EVENT(r, b) \
1235 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
1236 (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 || \
1237 (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) || \
1238 (r) == 188 || (b) == 61 || (b) == 62 || \
1239 ((b) >= 64 && (b) <= 67))
1240 #define IS_RANGE_V_1004K_EVENT(r) ((r) == 47)
1244 * User can use 0-255 raw events, where 0-127 for the events of even
1245 * counters, and 128-255 for odd counters. Note that bit 7 is used to
1246 * indicate the parity. So, for example, when user wants to take the
1247 * Event Num of 15 for odd counters (by referring to the user manual),
1248 * then 128 needs to be added to 15 as the input for the event config,
1249 * i.e., 143 (0x8F) to be used.
1251 static const struct mips_perf_event
*mipsxx_pmu_map_raw_event(u64 config
)
1253 unsigned int raw_id
= config
& 0xff;
1254 unsigned int base_id
= raw_id
& 0x7f;
1256 raw_event
.event_id
= base_id
;
1258 switch (current_cpu_type()) {
1260 if (IS_BOTH_COUNTERS_24K_EVENT(base_id
))
1261 raw_event
.cntr_mask
= CNTR_EVEN
| CNTR_ODD
;
1263 raw_event
.cntr_mask
=
1264 raw_id
> 127 ? CNTR_ODD
: CNTR_EVEN
;
1265 #ifdef CONFIG_MIPS_MT_SMP
1267 * This is actually doing nothing. Non-multithreading
1268 * CPUs will not check and calculate the range.
1270 raw_event
.range
= P
;
1274 if (IS_BOTH_COUNTERS_34K_EVENT(base_id
))
1275 raw_event
.cntr_mask
= CNTR_EVEN
| CNTR_ODD
;
1277 raw_event
.cntr_mask
=
1278 raw_id
> 127 ? CNTR_ODD
: CNTR_EVEN
;
1279 #ifdef CONFIG_MIPS_MT_SMP
1280 if (IS_RANGE_P_34K_EVENT(raw_id
, base_id
))
1281 raw_event
.range
= P
;
1282 else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id
)))
1283 raw_event
.range
= V
;
1285 raw_event
.range
= T
;
1289 if (IS_BOTH_COUNTERS_74K_EVENT(base_id
))
1290 raw_event
.cntr_mask
= CNTR_EVEN
| CNTR_ODD
;
1292 raw_event
.cntr_mask
=
1293 raw_id
> 127 ? CNTR_ODD
: CNTR_EVEN
;
1294 #ifdef CONFIG_MIPS_MT_SMP
1295 raw_event
.range
= P
;
1299 if (IS_BOTH_COUNTERS_1004K_EVENT(base_id
))
1300 raw_event
.cntr_mask
= CNTR_EVEN
| CNTR_ODD
;
1302 raw_event
.cntr_mask
=
1303 raw_id
> 127 ? CNTR_ODD
: CNTR_EVEN
;
1304 #ifdef CONFIG_MIPS_MT_SMP
1305 if (IS_RANGE_P_1004K_EVENT(raw_id
, base_id
))
1306 raw_event
.range
= P
;
1307 else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id
)))
1308 raw_event
.range
= V
;
1310 raw_event
.range
= T
;
1318 static const struct mips_perf_event
*octeon_pmu_map_raw_event(u64 config
)
1320 unsigned int raw_id
= config
& 0xff;
1321 unsigned int base_id
= raw_id
& 0x7f;
1324 raw_event
.cntr_mask
= CNTR_ALL
;
1325 raw_event
.event_id
= base_id
;
1327 if (current_cpu_type() == CPU_CAVIUM_OCTEON2
) {
1329 return ERR_PTR(-EOPNOTSUPP
);
1332 return ERR_PTR(-EOPNOTSUPP
);
1343 return ERR_PTR(-EOPNOTSUPP
);
1352 init_hw_perf_events(void)
1357 pr_info("Performance counters: ");
1359 counters
= n_counters();
1360 if (counters
== 0) {
1361 pr_cont("No available PMU.\n");
1365 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1366 cpu_has_mipsmt_pertccounters
= read_c0_config7() & (1<<19);
1367 if (!cpu_has_mipsmt_pertccounters
)
1368 counters
= counters_total_to_per_cpu(counters
);
1371 #ifdef MSC01E_INT_BASE
1374 * Using platform specific interrupt controller defines.
1376 irq
= MSC01E_INT_BASE
+ MSC01E_INT_PERFCTR
;
1379 if ((cp0_perfcount_irq
>= 0) &&
1380 (cp0_compare_irq
!= cp0_perfcount_irq
))
1381 irq
= MIPS_CPU_IRQ_BASE
+ cp0_perfcount_irq
;
1384 #ifdef MSC01E_INT_BASE
1388 mipspmu
.map_raw_event
= mipsxx_pmu_map_raw_event
;
1390 switch (current_cpu_type()) {
1392 mipspmu
.name
= "mips/24K";
1393 mipspmu
.general_event_map
= &mipsxxcore_event_map
;
1394 mipspmu
.cache_event_map
= &mipsxxcore_cache_map
;
1397 mipspmu
.name
= "mips/34K";
1398 mipspmu
.general_event_map
= &mipsxxcore_event_map
;
1399 mipspmu
.cache_event_map
= &mipsxxcore_cache_map
;
1402 mipspmu
.name
= "mips/74K";
1403 mipspmu
.general_event_map
= &mipsxx74Kcore_event_map
;
1404 mipspmu
.cache_event_map
= &mipsxx74Kcore_cache_map
;
1407 mipspmu
.name
= "mips/1004K";
1408 mipspmu
.general_event_map
= &mipsxxcore_event_map
;
1409 mipspmu
.cache_event_map
= &mipsxxcore_cache_map
;
1412 mipspmu
.name
= "mips/loongson1";
1413 mipspmu
.general_event_map
= &mipsxxcore_event_map
;
1414 mipspmu
.cache_event_map
= &mipsxxcore_cache_map
;
1416 case CPU_CAVIUM_OCTEON
:
1417 case CPU_CAVIUM_OCTEON_PLUS
:
1418 case CPU_CAVIUM_OCTEON2
:
1419 mipspmu
.name
= "octeon";
1420 mipspmu
.general_event_map
= &octeon_event_map
;
1421 mipspmu
.cache_event_map
= &octeon_cache_map
;
1422 mipspmu
.map_raw_event
= octeon_pmu_map_raw_event
;
1425 pr_cont("Either hardware does not support performance "
1426 "counters, or not yet implemented.\n");
1430 mipspmu
.num_counters
= counters
;
1433 if (read_c0_perfctrl0() & M_PERFCTL_WIDE
) {
1434 mipspmu
.max_period
= (1ULL << 63) - 1;
1435 mipspmu
.valid_count
= (1ULL << 63) - 1;
1436 mipspmu
.overflow
= 1ULL << 63;
1437 mipspmu
.read_counter
= mipsxx_pmu_read_counter_64
;
1438 mipspmu
.write_counter
= mipsxx_pmu_write_counter_64
;
1441 mipspmu
.max_period
= (1ULL << 31) - 1;
1442 mipspmu
.valid_count
= (1ULL << 31) - 1;
1443 mipspmu
.overflow
= 1ULL << 31;
1444 mipspmu
.read_counter
= mipsxx_pmu_read_counter
;
1445 mipspmu
.write_counter
= mipsxx_pmu_write_counter
;
1449 on_each_cpu(reset_counters
, (void *)(long)counters
, 1);
1451 pr_cont("%s PMU enabled, %d %d-bit counters available to each "
1452 "CPU, irq %d%s\n", mipspmu
.name
, counters
, counter_bits
, irq
,
1453 irq
< 0 ? " (share with timer interrupt)" : "");
1455 perf_pmu_register(&pmu
, "cpu", PERF_TYPE_RAW
);
1459 early_initcall(init_hw_perf_events
);