1 /* Performance event support for sparc64.
3 * Copyright (C) 2009, 2010 David S. Miller <davem@davemloft.net>
5 * This code is based almost entirely upon the x86 perf event
8 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
9 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
10 * Copyright (C) 2009 Jaswinder Singh Rajput
11 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
12 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
15 #include <linux/perf_event.h>
16 #include <linux/kprobes.h>
17 #include <linux/kernel.h>
18 #include <linux/kdebug.h>
19 #include <linux/mutex.h>
21 #include <asm/stacktrace.h>
22 #include <asm/cpudata.h>
23 #include <asm/uaccess.h>
24 #include <asm/atomic.h>
30 /* Sparc64 chips have two performance counters, 32-bits each, with
31 * overflow interrupts generated on transition from 0xffffffff to 0.
32 * The counters are accessed in one go using a 64-bit register.
34 * Both counters are controlled using a single control register. The
35 * only way to stop all sampling is to clear all of the context (user,
36 * supervisor, hypervisor) sampling enable bits. But these bits apply
37 * to both counters, thus the two counters can't be enabled/disabled
40 * The control register has two event fields, one for each of the two
41 * counters. It's thus nearly impossible to have one counter going
42 * while keeping the other one stopped. Therefore it is possible to
43 * get overflow interrupts for counters not currently "in use" and
44 * that condition must be checked in the overflow interrupt handler.
46 * So we use a hack, in that we program inactive counters with the
47 * "sw_count0" and "sw_count1" events. These count how many times
48 * the instruction "sethi %hi(0xfc000), %g0" is executed. It's an
49 * unusual way to encode a NOP and therefore will not trigger in
53 #define MAX_HWEVENTS 2
54 #define MAX_PERIOD ((1UL << 32) - 1)
56 #define PIC_UPPER_INDEX 0
57 #define PIC_LOWER_INDEX 1
59 struct cpu_hw_events
{
60 struct perf_event
*events
[MAX_HWEVENTS
];
61 unsigned long used_mask
[BITS_TO_LONGS(MAX_HWEVENTS
)];
62 unsigned long active_mask
[BITS_TO_LONGS(MAX_HWEVENTS
)];
66 DEFINE_PER_CPU(struct cpu_hw_events
, cpu_hw_events
) = { .enabled
= 1, };
68 struct perf_event_map
{
72 #define PIC_UPPER 0x01
73 #define PIC_LOWER 0x02
76 static unsigned long perf_event_encode(const struct perf_event_map
*pmap
)
78 return ((unsigned long) pmap
->encoding
<< 16) | pmap
->pic_mask
;
81 static void perf_event_decode(unsigned long val
, u16
*enc
, u8
*msk
)
87 #define C(x) PERF_COUNT_HW_CACHE_##x
89 #define CACHE_OP_UNSUPPORTED 0xfffe
90 #define CACHE_OP_NONSENSE 0xffff
92 typedef struct perf_event_map cache_map_t
93 [PERF_COUNT_HW_CACHE_MAX
]
94 [PERF_COUNT_HW_CACHE_OP_MAX
]
95 [PERF_COUNT_HW_CACHE_RESULT_MAX
];
98 const struct perf_event_map
*(*event_map
)(int);
99 const cache_map_t
*cache_map
;
110 static const struct perf_event_map ultra3_perfmon_event_map
[] = {
111 [PERF_COUNT_HW_CPU_CYCLES
] = { 0x0000, PIC_UPPER
| PIC_LOWER
},
112 [PERF_COUNT_HW_INSTRUCTIONS
] = { 0x0001, PIC_UPPER
| PIC_LOWER
},
113 [PERF_COUNT_HW_CACHE_REFERENCES
] = { 0x0009, PIC_LOWER
},
114 [PERF_COUNT_HW_CACHE_MISSES
] = { 0x0009, PIC_UPPER
},
117 static const struct perf_event_map
*ultra3_event_map(int event_id
)
119 return &ultra3_perfmon_event_map
[event_id
];
122 static const cache_map_t ultra3_cache_map
= {
125 [C(RESULT_ACCESS
)] = { 0x09, PIC_LOWER
, },
126 [C(RESULT_MISS
)] = { 0x09, PIC_UPPER
, },
129 [C(RESULT_ACCESS
)] = { 0x0a, PIC_LOWER
},
130 [C(RESULT_MISS
)] = { 0x0a, PIC_UPPER
},
133 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
134 [C(RESULT_MISS
)] = { CACHE_OP_UNSUPPORTED
},
139 [C(RESULT_ACCESS
)] = { 0x09, PIC_LOWER
, },
140 [C(RESULT_MISS
)] = { 0x09, PIC_UPPER
, },
143 [ C(RESULT_ACCESS
) ] = { CACHE_OP_NONSENSE
},
144 [ C(RESULT_MISS
) ] = { CACHE_OP_NONSENSE
},
146 [ C(OP_PREFETCH
) ] = {
147 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
148 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
153 [C(RESULT_ACCESS
)] = { 0x0c, PIC_LOWER
, },
154 [C(RESULT_MISS
)] = { 0x0c, PIC_UPPER
, },
157 [C(RESULT_ACCESS
)] = { 0x0c, PIC_LOWER
},
158 [C(RESULT_MISS
)] = { 0x0c, PIC_UPPER
},
161 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
162 [C(RESULT_MISS
)] = { CACHE_OP_UNSUPPORTED
},
167 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
168 [C(RESULT_MISS
)] = { 0x12, PIC_UPPER
, },
171 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
172 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
174 [ C(OP_PREFETCH
) ] = {
175 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
176 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
181 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
182 [C(RESULT_MISS
)] = { 0x11, PIC_UPPER
, },
185 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
186 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
188 [ C(OP_PREFETCH
) ] = {
189 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
190 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
195 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
196 [C(RESULT_MISS
)] = { CACHE_OP_UNSUPPORTED
},
199 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
200 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
202 [ C(OP_PREFETCH
) ] = {
203 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
204 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
209 static const struct sparc_pmu ultra3_pmu
= {
210 .event_map
= ultra3_event_map
,
211 .cache_map
= &ultra3_cache_map
,
212 .max_events
= ARRAY_SIZE(ultra3_perfmon_event_map
),
220 /* Niagara1 is very limited. The upper PIC is hard-locked to count
221 * only instructions, so it is free running which creates all kinds of
222 * problems. Some hardware designs make one wonder if the creator
223 * even looked at how this stuff gets used by software.
225 static const struct perf_event_map niagara1_perfmon_event_map
[] = {
226 [PERF_COUNT_HW_CPU_CYCLES
] = { 0x00, PIC_UPPER
},
227 [PERF_COUNT_HW_INSTRUCTIONS
] = { 0x00, PIC_UPPER
},
228 [PERF_COUNT_HW_CACHE_REFERENCES
] = { 0, PIC_NONE
},
229 [PERF_COUNT_HW_CACHE_MISSES
] = { 0x03, PIC_LOWER
},
232 static const struct perf_event_map
*niagara1_event_map(int event_id
)
234 return &niagara1_perfmon_event_map
[event_id
];
237 static const cache_map_t niagara1_cache_map
= {
240 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
241 [C(RESULT_MISS
)] = { 0x03, PIC_LOWER
, },
244 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
245 [C(RESULT_MISS
)] = { 0x03, PIC_LOWER
, },
248 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
249 [C(RESULT_MISS
)] = { CACHE_OP_UNSUPPORTED
},
254 [C(RESULT_ACCESS
)] = { 0x00, PIC_UPPER
},
255 [C(RESULT_MISS
)] = { 0x02, PIC_LOWER
, },
258 [ C(RESULT_ACCESS
) ] = { CACHE_OP_NONSENSE
},
259 [ C(RESULT_MISS
) ] = { CACHE_OP_NONSENSE
},
261 [ C(OP_PREFETCH
) ] = {
262 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
263 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
268 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
269 [C(RESULT_MISS
)] = { 0x07, PIC_LOWER
, },
272 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
273 [C(RESULT_MISS
)] = { 0x07, PIC_LOWER
, },
276 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
277 [C(RESULT_MISS
)] = { CACHE_OP_UNSUPPORTED
},
282 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
283 [C(RESULT_MISS
)] = { 0x05, PIC_LOWER
, },
286 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
287 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
289 [ C(OP_PREFETCH
) ] = {
290 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
291 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
296 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
297 [C(RESULT_MISS
)] = { 0x04, PIC_LOWER
, },
300 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
301 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
303 [ C(OP_PREFETCH
) ] = {
304 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
305 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
310 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
311 [C(RESULT_MISS
)] = { CACHE_OP_UNSUPPORTED
},
314 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
315 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
317 [ C(OP_PREFETCH
) ] = {
318 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
319 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
324 static const struct sparc_pmu niagara1_pmu
= {
325 .event_map
= niagara1_event_map
,
326 .cache_map
= &niagara1_cache_map
,
327 .max_events
= ARRAY_SIZE(niagara1_perfmon_event_map
),
335 static const struct perf_event_map niagara2_perfmon_event_map
[] = {
336 [PERF_COUNT_HW_CPU_CYCLES
] = { 0x02ff, PIC_UPPER
| PIC_LOWER
},
337 [PERF_COUNT_HW_INSTRUCTIONS
] = { 0x02ff, PIC_UPPER
| PIC_LOWER
},
338 [PERF_COUNT_HW_CACHE_REFERENCES
] = { 0x0208, PIC_UPPER
| PIC_LOWER
},
339 [PERF_COUNT_HW_CACHE_MISSES
] = { 0x0302, PIC_UPPER
| PIC_LOWER
},
340 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = { 0x0201, PIC_UPPER
| PIC_LOWER
},
341 [PERF_COUNT_HW_BRANCH_MISSES
] = { 0x0202, PIC_UPPER
| PIC_LOWER
},
344 static const struct perf_event_map
*niagara2_event_map(int event_id
)
346 return &niagara2_perfmon_event_map
[event_id
];
349 static const cache_map_t niagara2_cache_map
= {
352 [C(RESULT_ACCESS
)] = { 0x0208, PIC_UPPER
| PIC_LOWER
, },
353 [C(RESULT_MISS
)] = { 0x0302, PIC_UPPER
| PIC_LOWER
, },
356 [C(RESULT_ACCESS
)] = { 0x0210, PIC_UPPER
| PIC_LOWER
, },
357 [C(RESULT_MISS
)] = { 0x0302, PIC_UPPER
| PIC_LOWER
, },
360 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
361 [C(RESULT_MISS
)] = { CACHE_OP_UNSUPPORTED
},
366 [C(RESULT_ACCESS
)] = { 0x02ff, PIC_UPPER
| PIC_LOWER
, },
367 [C(RESULT_MISS
)] = { 0x0301, PIC_UPPER
| PIC_LOWER
, },
370 [ C(RESULT_ACCESS
) ] = { CACHE_OP_NONSENSE
},
371 [ C(RESULT_MISS
) ] = { CACHE_OP_NONSENSE
},
373 [ C(OP_PREFETCH
) ] = {
374 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
375 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
380 [C(RESULT_ACCESS
)] = { 0x0208, PIC_UPPER
| PIC_LOWER
, },
381 [C(RESULT_MISS
)] = { 0x0330, PIC_UPPER
| PIC_LOWER
, },
384 [C(RESULT_ACCESS
)] = { 0x0210, PIC_UPPER
| PIC_LOWER
, },
385 [C(RESULT_MISS
)] = { 0x0320, PIC_UPPER
| PIC_LOWER
, },
388 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
389 [C(RESULT_MISS
)] = { CACHE_OP_UNSUPPORTED
},
394 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
395 [C(RESULT_MISS
)] = { 0x0b08, PIC_UPPER
| PIC_LOWER
, },
398 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
399 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
401 [ C(OP_PREFETCH
) ] = {
402 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
403 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
408 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
409 [C(RESULT_MISS
)] = { 0xb04, PIC_UPPER
| PIC_LOWER
, },
412 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
413 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
415 [ C(OP_PREFETCH
) ] = {
416 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
417 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
422 [C(RESULT_ACCESS
)] = { CACHE_OP_UNSUPPORTED
},
423 [C(RESULT_MISS
)] = { CACHE_OP_UNSUPPORTED
},
426 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
427 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
429 [ C(OP_PREFETCH
) ] = {
430 [ C(RESULT_ACCESS
) ] = { CACHE_OP_UNSUPPORTED
},
431 [ C(RESULT_MISS
) ] = { CACHE_OP_UNSUPPORTED
},
436 static const struct sparc_pmu niagara2_pmu
= {
437 .event_map
= niagara2_event_map
,
438 .cache_map
= &niagara2_cache_map
,
439 .max_events
= ARRAY_SIZE(niagara2_perfmon_event_map
),
449 static const struct sparc_pmu
*sparc_pmu __read_mostly
;
451 static u64
event_encoding(u64 event_id
, int idx
)
453 if (idx
== PIC_UPPER_INDEX
)
454 event_id
<<= sparc_pmu
->upper_shift
;
456 event_id
<<= sparc_pmu
->lower_shift
;
460 static u64
mask_for_index(int idx
)
462 return event_encoding(sparc_pmu
->event_mask
, idx
);
465 static u64
nop_for_index(int idx
)
467 return event_encoding(idx
== PIC_UPPER_INDEX
?
468 sparc_pmu
->upper_nop
:
469 sparc_pmu
->lower_nop
, idx
);
472 static inline void sparc_pmu_enable_event(struct cpu_hw_events
*cpuc
, struct hw_perf_event
*hwc
, int idx
)
474 u64 val
, mask
= mask_for_index(idx
);
481 pcr_ops
->write(cpuc
->pcr
);
484 static inline void sparc_pmu_disable_event(struct cpu_hw_events
*cpuc
, struct hw_perf_event
*hwc
, int idx
)
486 u64 mask
= mask_for_index(idx
);
487 u64 nop
= nop_for_index(idx
);
495 pcr_ops
->write(cpuc
->pcr
);
498 void hw_perf_enable(void)
500 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
512 for (i
= 0; i
< MAX_HWEVENTS
; i
++) {
513 struct perf_event
*cp
= cpuc
->events
[i
];
514 struct hw_perf_event
*hwc
;
519 val
|= hwc
->config_base
;
524 pcr_ops
->write(cpuc
->pcr
);
527 void hw_perf_disable(void)
529 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
538 val
&= ~(PCR_UTRACE
| PCR_STRACE
|
539 sparc_pmu
->hv_bit
| sparc_pmu
->irq_bit
);
542 pcr_ops
->write(cpuc
->pcr
);
545 static u32
read_pmc(int idx
)
550 if (idx
== PIC_UPPER_INDEX
)
553 return val
& 0xffffffff;
556 static void write_pmc(int idx
, u64 val
)
558 u64 shift
, mask
, pic
;
561 if (idx
== PIC_UPPER_INDEX
)
564 mask
= ((u64
) 0xffffffff) << shift
;
573 static int sparc_perf_event_set_period(struct perf_event
*event
,
574 struct hw_perf_event
*hwc
, int idx
)
576 s64 left
= atomic64_read(&hwc
->period_left
);
577 s64 period
= hwc
->sample_period
;
580 if (unlikely(left
<= -period
)) {
582 atomic64_set(&hwc
->period_left
, left
);
583 hwc
->last_period
= period
;
587 if (unlikely(left
<= 0)) {
589 atomic64_set(&hwc
->period_left
, left
);
590 hwc
->last_period
= period
;
593 if (left
> MAX_PERIOD
)
596 atomic64_set(&hwc
->prev_count
, (u64
)-left
);
598 write_pmc(idx
, (u64
)(-left
) & 0xffffffff);
600 perf_event_update_userpage(event
);
605 static int sparc_pmu_enable(struct perf_event
*event
)
607 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
608 struct hw_perf_event
*hwc
= &event
->hw
;
611 if (test_and_set_bit(idx
, cpuc
->used_mask
))
614 sparc_pmu_disable_event(cpuc
, hwc
, idx
);
616 cpuc
->events
[idx
] = event
;
617 set_bit(idx
, cpuc
->active_mask
);
619 sparc_perf_event_set_period(event
, hwc
, idx
);
620 sparc_pmu_enable_event(cpuc
, hwc
, idx
);
621 perf_event_update_userpage(event
);
625 static u64
sparc_perf_event_update(struct perf_event
*event
,
626 struct hw_perf_event
*hwc
, int idx
)
629 u64 prev_raw_count
, new_raw_count
;
633 prev_raw_count
= atomic64_read(&hwc
->prev_count
);
634 new_raw_count
= read_pmc(idx
);
636 if (atomic64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
637 new_raw_count
) != prev_raw_count
)
640 delta
= (new_raw_count
<< shift
) - (prev_raw_count
<< shift
);
643 atomic64_add(delta
, &event
->count
);
644 atomic64_sub(delta
, &hwc
->period_left
);
646 return new_raw_count
;
649 static void sparc_pmu_disable(struct perf_event
*event
)
651 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
652 struct hw_perf_event
*hwc
= &event
->hw
;
655 clear_bit(idx
, cpuc
->active_mask
);
656 sparc_pmu_disable_event(cpuc
, hwc
, idx
);
660 sparc_perf_event_update(event
, hwc
, idx
);
661 cpuc
->events
[idx
] = NULL
;
662 clear_bit(idx
, cpuc
->used_mask
);
664 perf_event_update_userpage(event
);
667 static void sparc_pmu_read(struct perf_event
*event
)
669 struct hw_perf_event
*hwc
= &event
->hw
;
671 sparc_perf_event_update(event
, hwc
, hwc
->idx
);
674 static void sparc_pmu_unthrottle(struct perf_event
*event
)
676 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
677 struct hw_perf_event
*hwc
= &event
->hw
;
679 sparc_pmu_enable_event(cpuc
, hwc
, hwc
->idx
);
682 static atomic_t active_events
= ATOMIC_INIT(0);
683 static DEFINE_MUTEX(pmc_grab_mutex
);
685 static void perf_stop_nmi_watchdog(void *unused
)
687 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
689 stop_nmi_watchdog(NULL
);
690 cpuc
->pcr
= pcr_ops
->read();
693 void perf_event_grab_pmc(void)
695 if (atomic_inc_not_zero(&active_events
))
698 mutex_lock(&pmc_grab_mutex
);
699 if (atomic_read(&active_events
) == 0) {
700 if (atomic_read(&nmi_active
) > 0) {
701 on_each_cpu(perf_stop_nmi_watchdog
, NULL
, 1);
702 BUG_ON(atomic_read(&nmi_active
) != 0);
704 atomic_inc(&active_events
);
706 mutex_unlock(&pmc_grab_mutex
);
709 void perf_event_release_pmc(void)
711 if (atomic_dec_and_mutex_lock(&active_events
, &pmc_grab_mutex
)) {
712 if (atomic_read(&nmi_active
) == 0)
713 on_each_cpu(start_nmi_watchdog
, NULL
, 1);
714 mutex_unlock(&pmc_grab_mutex
);
718 static const struct perf_event_map
*sparc_map_cache_event(u64 config
)
720 unsigned int cache_type
, cache_op
, cache_result
;
721 const struct perf_event_map
*pmap
;
723 if (!sparc_pmu
->cache_map
)
724 return ERR_PTR(-ENOENT
);
726 cache_type
= (config
>> 0) & 0xff;
727 if (cache_type
>= PERF_COUNT_HW_CACHE_MAX
)
728 return ERR_PTR(-EINVAL
);
730 cache_op
= (config
>> 8) & 0xff;
731 if (cache_op
>= PERF_COUNT_HW_CACHE_OP_MAX
)
732 return ERR_PTR(-EINVAL
);
734 cache_result
= (config
>> 16) & 0xff;
735 if (cache_result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
736 return ERR_PTR(-EINVAL
);
738 pmap
= &((*sparc_pmu
->cache_map
)[cache_type
][cache_op
][cache_result
]);
740 if (pmap
->encoding
== CACHE_OP_UNSUPPORTED
)
741 return ERR_PTR(-ENOENT
);
743 if (pmap
->encoding
== CACHE_OP_NONSENSE
)
744 return ERR_PTR(-EINVAL
);
749 static void hw_perf_event_destroy(struct perf_event
*event
)
751 perf_event_release_pmc();
754 /* Make sure all events can be scheduled into the hardware at
755 * the same time. This is simplified by the fact that we only
756 * need to support 2 simultaneous HW events.
758 static int sparc_check_constraints(unsigned long *events
, int n_ev
)
760 if (n_ev
<= perf_max_events
) {
767 perf_event_decode(events
[0], &dummy
, &msk1
);
768 perf_event_decode(events
[1], &dummy
, &msk2
);
770 /* If both events can go on any counter, OK. */
771 if (msk1
== (PIC_UPPER
| PIC_LOWER
) &&
772 msk2
== (PIC_UPPER
| PIC_LOWER
))
775 /* If one event is limited to a specific counter,
776 * and the other can go on both, OK.
778 if ((msk1
== PIC_UPPER
|| msk1
== PIC_LOWER
) &&
779 msk2
== (PIC_UPPER
| PIC_LOWER
))
781 if ((msk2
== PIC_UPPER
|| msk2
== PIC_LOWER
) &&
782 msk1
== (PIC_UPPER
| PIC_LOWER
))
785 /* If the events are fixed to different counters, OK. */
786 if ((msk1
== PIC_UPPER
&& msk2
== PIC_LOWER
) ||
787 (msk1
== PIC_LOWER
&& msk2
== PIC_UPPER
))
790 /* Otherwise, there is a conflict. */
796 static int check_excludes(struct perf_event
**evts
, int n_prev
, int n_new
)
798 int eu
= 0, ek
= 0, eh
= 0;
799 struct perf_event
*event
;
807 for (i
= 0; i
< n
; i
++) {
810 eu
= event
->attr
.exclude_user
;
811 ek
= event
->attr
.exclude_kernel
;
812 eh
= event
->attr
.exclude_hv
;
814 } else if (event
->attr
.exclude_user
!= eu
||
815 event
->attr
.exclude_kernel
!= ek
||
816 event
->attr
.exclude_hv
!= eh
) {
824 static int collect_events(struct perf_event
*group
, int max_count
,
825 struct perf_event
*evts
[], unsigned long *events
)
827 struct perf_event
*event
;
830 if (!is_software_event(group
)) {
834 events
[n
++] = group
->hw
.event_base
;
836 list_for_each_entry(event
, &group
->sibling_list
, group_entry
) {
837 if (!is_software_event(event
) &&
838 event
->state
!= PERF_EVENT_STATE_OFF
) {
842 events
[n
++] = event
->hw
.event_base
;
848 static int __hw_perf_event_init(struct perf_event
*event
)
850 struct perf_event_attr
*attr
= &event
->attr
;
851 struct perf_event
*evts
[MAX_HWEVENTS
];
852 struct hw_perf_event
*hwc
= &event
->hw
;
853 unsigned long events
[MAX_HWEVENTS
];
854 const struct perf_event_map
*pmap
;
858 if (atomic_read(&nmi_active
) < 0)
861 if (attr
->type
== PERF_TYPE_HARDWARE
) {
862 if (attr
->config
>= sparc_pmu
->max_events
)
864 pmap
= sparc_pmu
->event_map(attr
->config
);
865 } else if (attr
->type
== PERF_TYPE_HW_CACHE
) {
866 pmap
= sparc_map_cache_event(attr
->config
);
868 return PTR_ERR(pmap
);
872 /* We save the enable bits in the config_base. So to
873 * turn off sampling just write 'config', and to enable
874 * things write 'config | config_base'.
876 hwc
->config_base
= sparc_pmu
->irq_bit
;
877 if (!attr
->exclude_user
)
878 hwc
->config_base
|= PCR_UTRACE
;
879 if (!attr
->exclude_kernel
)
880 hwc
->config_base
|= PCR_STRACE
;
881 if (!attr
->exclude_hv
)
882 hwc
->config_base
|= sparc_pmu
->hv_bit
;
884 hwc
->event_base
= perf_event_encode(pmap
);
886 enc
= pmap
->encoding
;
889 if (event
->group_leader
!= event
) {
890 n
= collect_events(event
->group_leader
,
896 events
[n
] = hwc
->event_base
;
899 if (check_excludes(evts
, n
, 1))
902 if (sparc_check_constraints(events
, n
+ 1))
905 /* Try to do all error checking before this point, as unwinding
906 * state after grabbing the PMC is difficult.
908 perf_event_grab_pmc();
909 event
->destroy
= hw_perf_event_destroy
;
911 if (!hwc
->sample_period
) {
912 hwc
->sample_period
= MAX_PERIOD
;
913 hwc
->last_period
= hwc
->sample_period
;
914 atomic64_set(&hwc
->period_left
, hwc
->sample_period
);
917 if (pmap
->pic_mask
& PIC_UPPER
) {
918 hwc
->idx
= PIC_UPPER_INDEX
;
919 enc
<<= sparc_pmu
->upper_shift
;
921 hwc
->idx
= PIC_LOWER_INDEX
;
922 enc
<<= sparc_pmu
->lower_shift
;
929 static const struct pmu pmu
= {
930 .enable
= sparc_pmu_enable
,
931 .disable
= sparc_pmu_disable
,
932 .read
= sparc_pmu_read
,
933 .unthrottle
= sparc_pmu_unthrottle
,
936 const struct pmu
*hw_perf_event_init(struct perf_event
*event
)
938 int err
= __hw_perf_event_init(event
);
945 void perf_event_print_debug(void)
954 local_irq_save(flags
);
956 cpu
= smp_processor_id();
958 pcr
= pcr_ops
->read();
962 pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n",
965 local_irq_restore(flags
);
968 static int __kprobes
perf_event_nmi_handler(struct notifier_block
*self
,
969 unsigned long cmd
, void *__args
)
971 struct die_args
*args
= __args
;
972 struct perf_sample_data data
;
973 struct cpu_hw_events
*cpuc
;
974 struct pt_regs
*regs
;
977 if (!atomic_read(&active_events
))
992 cpuc
= &__get_cpu_var(cpu_hw_events
);
994 /* If the PMU has the TOE IRQ enable bits, we need to do a
995 * dummy write to the %pcr to clear the overflow bits and thus
998 * Do this before we peek at the counters to determine
999 * overflow so we don't lose any events.
1001 if (sparc_pmu
->irq_bit
)
1002 pcr_ops
->write(cpuc
->pcr
);
1004 for (idx
= 0; idx
< MAX_HWEVENTS
; idx
++) {
1005 struct perf_event
*event
= cpuc
->events
[idx
];
1006 struct hw_perf_event
*hwc
;
1009 if (!test_bit(idx
, cpuc
->active_mask
))
1012 val
= sparc_perf_event_update(event
, hwc
, idx
);
1013 if (val
& (1ULL << 31))
1016 data
.period
= event
->hw
.last_period
;
1017 if (!sparc_perf_event_set_period(event
, hwc
, idx
))
1020 if (perf_event_overflow(event
, 1, &data
, regs
))
1021 sparc_pmu_disable_event(cpuc
, hwc
, idx
);
1027 static __read_mostly
struct notifier_block perf_event_nmi_notifier
= {
1028 .notifier_call
= perf_event_nmi_handler
,
1031 static bool __init
supported_pmu(void)
1033 if (!strcmp(sparc_pmu_type
, "ultra3") ||
1034 !strcmp(sparc_pmu_type
, "ultra3+") ||
1035 !strcmp(sparc_pmu_type
, "ultra3i") ||
1036 !strcmp(sparc_pmu_type
, "ultra4+")) {
1037 sparc_pmu
= &ultra3_pmu
;
1040 if (!strcmp(sparc_pmu_type
, "niagara")) {
1041 sparc_pmu
= &niagara1_pmu
;
1044 if (!strcmp(sparc_pmu_type
, "niagara2")) {
1045 sparc_pmu
= &niagara2_pmu
;
1051 void __init
init_hw_perf_events(void)
1053 pr_info("Performance events: ");
1055 if (!supported_pmu()) {
1056 pr_cont("No support for PMU type '%s'\n", sparc_pmu_type
);
1060 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type
);
1062 /* All sparc64 PMUs currently have 2 events. But this simple
1063 * driver only supports one active event at a time.
1065 perf_max_events
= 1;
1067 register_die_notifier(&perf_event_nmi_notifier
);
1070 static inline void callchain_store(struct perf_callchain_entry
*entry
, u64 ip
)
1072 if (entry
->nr
< PERF_MAX_STACK_DEPTH
)
1073 entry
->ip
[entry
->nr
++] = ip
;
1076 static void perf_callchain_kernel(struct pt_regs
*regs
,
1077 struct perf_callchain_entry
*entry
)
1079 unsigned long ksp
, fp
;
1081 callchain_store(entry
, PERF_CONTEXT_KERNEL
);
1082 callchain_store(entry
, regs
->tpc
);
1084 ksp
= regs
->u_regs
[UREG_I6
];
1085 fp
= ksp
+ STACK_BIAS
;
1087 struct sparc_stackf
*sf
;
1088 struct pt_regs
*regs
;
1091 if (!kstack_valid(current_thread_info(), fp
))
1094 sf
= (struct sparc_stackf
*) fp
;
1095 regs
= (struct pt_regs
*) (sf
+ 1);
1097 if (kstack_is_trap_frame(current_thread_info(), regs
)) {
1098 if (user_mode(regs
))
1101 fp
= regs
->u_regs
[UREG_I6
] + STACK_BIAS
;
1103 pc
= sf
->callers_pc
;
1104 fp
= (unsigned long)sf
->fp
+ STACK_BIAS
;
1106 callchain_store(entry
, pc
);
1107 } while (entry
->nr
< PERF_MAX_STACK_DEPTH
);
1110 static void perf_callchain_user_64(struct pt_regs
*regs
,
1111 struct perf_callchain_entry
*entry
)
1115 callchain_store(entry
, PERF_CONTEXT_USER
);
1116 callchain_store(entry
, regs
->tpc
);
1118 ufp
= regs
->u_regs
[UREG_I6
] + STACK_BIAS
;
1120 struct sparc_stackf
*usf
, sf
;
1123 usf
= (struct sparc_stackf
*) ufp
;
1124 if (__copy_from_user_inatomic(&sf
, usf
, sizeof(sf
)))
1128 ufp
= (unsigned long)sf
.fp
+ STACK_BIAS
;
1129 callchain_store(entry
, pc
);
1130 } while (entry
->nr
< PERF_MAX_STACK_DEPTH
);
1133 static void perf_callchain_user_32(struct pt_regs
*regs
,
1134 struct perf_callchain_entry
*entry
)
1138 callchain_store(entry
, PERF_CONTEXT_USER
);
1139 callchain_store(entry
, regs
->tpc
);
1141 ufp
= regs
->u_regs
[UREG_I6
];
1143 struct sparc_stackf32
*usf
, sf
;
1146 usf
= (struct sparc_stackf32
*) ufp
;
1147 if (__copy_from_user_inatomic(&sf
, usf
, sizeof(sf
)))
1151 ufp
= (unsigned long)sf
.fp
;
1152 callchain_store(entry
, pc
);
1153 } while (entry
->nr
< PERF_MAX_STACK_DEPTH
);
1156 /* Like powerpc we can't get PMU interrupts within the PMU handler,
1157 * so no need for seperate NMI and IRQ chains as on x86.
1159 static DEFINE_PER_CPU(struct perf_callchain_entry
, callchain
);
1161 struct perf_callchain_entry
*perf_callchain(struct pt_regs
*regs
)
1163 struct perf_callchain_entry
*entry
= &__get_cpu_var(callchain
);
1166 if (!user_mode(regs
)) {
1167 stack_trace_flush();
1168 perf_callchain_kernel(regs
, entry
);
1170 regs
= task_pt_regs(current
);
1176 if (test_thread_flag(TIF_32BIT
))
1177 perf_callchain_user_32(regs
, entry
);
1179 perf_callchain_user_64(regs
, entry
);