2 * Performance counter support - powerpc architecture code
4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/perf_counter.h>
14 #include <linux/percpu.h>
15 #include <linux/hardirq.h>
18 #include <asm/machdep.h>
19 #include <asm/firmware.h>
21 struct cpu_hw_counters
{
26 struct perf_counter
*counter
[MAX_HWCOUNTERS
];
27 unsigned int events
[MAX_HWCOUNTERS
];
31 DEFINE_PER_CPU(struct cpu_hw_counters
, cpu_hw_counters
);
33 struct power_pmu
*ppmu
;
36 * Normally, to ignore kernel events we set the FCS (freeze counters
37 * in supervisor mode) bit in MMCR0, but if the kernel runs with the
38 * hypervisor bit set in the MSR, or if we are running on a processor
39 * where the hypervisor bit is forced to 1 (as on Apple G5 processors),
40 * then we need to use the FCHV bit to ignore kernel events.
42 static unsigned int freeze_counters_kernel
= MMCR0_FCS
;
44 void perf_counter_print_debug(void)
49 * Read one performance monitor counter (PMC).
51 static unsigned long read_pmc(int idx
)
57 val
= mfspr(SPRN_PMC1
);
60 val
= mfspr(SPRN_PMC2
);
63 val
= mfspr(SPRN_PMC3
);
66 val
= mfspr(SPRN_PMC4
);
69 val
= mfspr(SPRN_PMC5
);
72 val
= mfspr(SPRN_PMC6
);
75 val
= mfspr(SPRN_PMC7
);
78 val
= mfspr(SPRN_PMC8
);
81 printk(KERN_ERR
"oops trying to read PMC%d\n", idx
);
90 static void write_pmc(int idx
, unsigned long val
)
94 mtspr(SPRN_PMC1
, val
);
97 mtspr(SPRN_PMC2
, val
);
100 mtspr(SPRN_PMC3
, val
);
103 mtspr(SPRN_PMC4
, val
);
106 mtspr(SPRN_PMC5
, val
);
109 mtspr(SPRN_PMC6
, val
);
112 mtspr(SPRN_PMC7
, val
);
115 mtspr(SPRN_PMC8
, val
);
118 printk(KERN_ERR
"oops trying to write PMC%d\n", idx
);
123 * Check if a set of events can all go on the PMU at once.
124 * If they can't, this will look at alternative codes for the events
125 * and see if any combination of alternative codes is feasible.
126 * The feasible set is returned in event[].
128 static int power_check_constraints(unsigned int event
[], int n_ev
)
131 unsigned int alternatives
[MAX_HWCOUNTERS
][MAX_EVENT_ALTERNATIVES
];
132 u64 amasks
[MAX_HWCOUNTERS
][MAX_EVENT_ALTERNATIVES
];
133 u64 avalues
[MAX_HWCOUNTERS
][MAX_EVENT_ALTERNATIVES
];
134 u64 smasks
[MAX_HWCOUNTERS
], svalues
[MAX_HWCOUNTERS
];
135 int n_alt
[MAX_HWCOUNTERS
], choice
[MAX_HWCOUNTERS
];
137 u64 addf
= ppmu
->add_fields
;
138 u64 tadd
= ppmu
->test_adder
;
140 if (n_ev
> ppmu
->n_counter
)
143 /* First see if the events will go on as-is */
144 for (i
= 0; i
< n_ev
; ++i
) {
145 alternatives
[i
][0] = event
[i
];
146 if (ppmu
->get_constraint(event
[i
], &amasks
[i
][0],
152 for (i
= 0; i
< n_ev
; ++i
) {
153 nv
= (value
| avalues
[i
][0]) + (value
& avalues
[i
][0] & addf
);
154 if ((((nv
+ tadd
) ^ value
) & mask
) != 0 ||
155 (((nv
+ tadd
) ^ avalues
[i
][0]) & amasks
[i
][0]) != 0)
158 mask
|= amasks
[i
][0];
161 return 0; /* all OK */
163 /* doesn't work, gather alternatives... */
164 if (!ppmu
->get_alternatives
)
166 for (i
= 0; i
< n_ev
; ++i
) {
167 n_alt
[i
] = ppmu
->get_alternatives(event
[i
], alternatives
[i
]);
168 for (j
= 1; j
< n_alt
[i
]; ++j
)
169 ppmu
->get_constraint(alternatives
[i
][j
],
170 &amasks
[i
][j
], &avalues
[i
][j
]);
173 /* enumerate all possibilities and see if any will work */
176 value
= mask
= nv
= 0;
179 /* we're backtracking, restore context */
185 * See if any alternative k for event i,
186 * where k > j, will satisfy the constraints.
188 while (++j
< n_alt
[i
]) {
189 nv
= (value
| avalues
[i
][j
]) +
190 (value
& avalues
[i
][j
] & addf
);
191 if ((((nv
+ tadd
) ^ value
) & mask
) == 0 &&
192 (((nv
+ tadd
) ^ avalues
[i
][j
])
193 & amasks
[i
][j
]) == 0)
198 * No feasible alternative, backtrack
199 * to event i-1 and continue enumerating its
200 * alternatives from where we got up to.
206 * Found a feasible alternative for event i,
207 * remember where we got up to with this event,
208 * go on to the next event, and start with
209 * the first alternative for it.
215 mask
|= amasks
[i
][j
];
221 /* OK, we have a feasible combination, tell the caller the solution */
222 for (i
= 0; i
< n_ev
; ++i
)
223 event
[i
] = alternatives
[i
][choice
[i
]];
228 * Check if newly-added counters have consistent settings for
229 * exclude_{user,kernel,hv} with each other and any previously
232 static int check_excludes(struct perf_counter
**ctrs
, int n_prev
, int n_new
)
236 struct perf_counter
*counter
;
242 eu
= ctrs
[0]->hw_event
.exclude_user
;
243 ek
= ctrs
[0]->hw_event
.exclude_kernel
;
244 eh
= ctrs
[0]->hw_event
.exclude_hv
;
247 for (i
= n_prev
; i
< n
; ++i
) {
249 if (counter
->hw_event
.exclude_user
!= eu
||
250 counter
->hw_event
.exclude_kernel
!= ek
||
251 counter
->hw_event
.exclude_hv
!= eh
)
257 static void power_perf_read(struct perf_counter
*counter
)
259 long val
, delta
, prev
;
261 if (!counter
->hw
.idx
)
264 * Performance monitor interrupts come even when interrupts
265 * are soft-disabled, as long as interrupts are hard-enabled.
266 * Therefore we treat them like NMIs.
269 prev
= atomic64_read(&counter
->hw
.prev_count
);
271 val
= read_pmc(counter
->hw
.idx
);
272 } while (atomic64_cmpxchg(&counter
->hw
.prev_count
, prev
, val
) != prev
);
274 /* The counters are only 32 bits wide */
275 delta
= (val
- prev
) & 0xfffffffful
;
276 atomic64_add(delta
, &counter
->count
);
277 atomic64_sub(delta
, &counter
->hw
.period_left
);
281 * Disable all counters to prevent PMU interrupts and to allow
282 * counters to be added or removed.
284 u64
hw_perf_save_disable(void)
286 struct cpu_hw_counters
*cpuhw
;
290 local_irq_save(flags
);
291 cpuhw
= &__get_cpu_var(cpu_hw_counters
);
293 ret
= cpuhw
->disabled
;
299 * Check if we ever enabled the PMU on this cpu.
301 if (!cpuhw
->pmcs_enabled
) {
302 if (ppc_md
.enable_pmcs
)
303 ppc_md
.enable_pmcs();
304 cpuhw
->pmcs_enabled
= 1;
308 * Set the 'freeze counters' bit.
309 * The barrier is to make sure the mtspr has been
310 * executed and the PMU has frozen the counters
313 mtspr(SPRN_MMCR0
, mfspr(SPRN_MMCR0
) | MMCR0_FC
);
316 local_irq_restore(flags
);
321 * Re-enable all counters if disable == 0.
322 * If we were previously disabled and counters were added, then
323 * put the new config on the PMU.
325 void hw_perf_restore(u64 disable
)
327 struct perf_counter
*counter
;
328 struct cpu_hw_counters
*cpuhw
;
333 unsigned int hwc_index
[MAX_HWCOUNTERS
];
337 local_irq_save(flags
);
338 cpuhw
= &__get_cpu_var(cpu_hw_counters
);
342 * If we didn't change anything, or only removed counters,
343 * no need to recalculate MMCR* settings and reset the PMCs.
344 * Just reenable the PMU with the current MMCR* settings
345 * (possibly updated for removal of counters).
347 if (!cpuhw
->n_added
) {
348 mtspr(SPRN_MMCRA
, cpuhw
->mmcr
[2]);
349 mtspr(SPRN_MMCR1
, cpuhw
->mmcr
[1]);
350 mtspr(SPRN_MMCR0
, cpuhw
->mmcr
[0]);
351 if (cpuhw
->n_counters
== 0)
352 get_lppaca()->pmcregs_in_use
= 0;
357 * Compute MMCR* values for the new set of counters
359 if (ppmu
->compute_mmcr(cpuhw
->events
, cpuhw
->n_counters
, hwc_index
,
361 /* shouldn't ever get here */
362 printk(KERN_ERR
"oops compute_mmcr failed\n");
367 * Add in MMCR0 freeze bits corresponding to the
368 * hw_event.exclude_* bits for the first counter.
369 * We have already checked that all counters have the
370 * same values for these bits as the first counter.
372 counter
= cpuhw
->counter
[0];
373 if (counter
->hw_event
.exclude_user
)
374 cpuhw
->mmcr
[0] |= MMCR0_FCP
;
375 if (counter
->hw_event
.exclude_kernel
)
376 cpuhw
->mmcr
[0] |= freeze_counters_kernel
;
377 if (counter
->hw_event
.exclude_hv
)
378 cpuhw
->mmcr
[0] |= MMCR0_FCHV
;
381 * Write the new configuration to MMCR* with the freeze
382 * bit set and set the hardware counters to their initial values.
383 * Then unfreeze the counters.
385 get_lppaca()->pmcregs_in_use
= 1;
386 mtspr(SPRN_MMCRA
, cpuhw
->mmcr
[2]);
387 mtspr(SPRN_MMCR1
, cpuhw
->mmcr
[1]);
388 mtspr(SPRN_MMCR0
, (cpuhw
->mmcr
[0] & ~(MMCR0_PMC1CE
| MMCR0_PMCjCE
))
392 * Read off any pre-existing counters that need to move
395 for (i
= 0; i
< cpuhw
->n_counters
; ++i
) {
396 counter
= cpuhw
->counter
[i
];
397 if (counter
->hw
.idx
&& counter
->hw
.idx
!= hwc_index
[i
] + 1) {
398 power_perf_read(counter
);
399 write_pmc(counter
->hw
.idx
, 0);
405 * Initialize the PMCs for all the new and moved counters.
407 for (i
= 0; i
< cpuhw
->n_counters
; ++i
) {
408 counter
= cpuhw
->counter
[i
];
412 if (counter
->hw_event
.irq_period
) {
413 left
= atomic64_read(&counter
->hw
.period_left
);
414 if (left
< 0x80000000L
)
415 val
= 0x80000000L
- left
;
417 atomic64_set(&counter
->hw
.prev_count
, val
);
418 counter
->hw
.idx
= hwc_index
[i
] + 1;
419 write_pmc(counter
->hw
.idx
, val
);
420 perf_counter_update_userpage(counter
);
423 cpuhw
->mmcr
[0] |= MMCR0_PMXE
| MMCR0_FCECE
;
424 mtspr(SPRN_MMCR0
, cpuhw
->mmcr
[0]);
427 local_irq_restore(flags
);
430 static int collect_events(struct perf_counter
*group
, int max_count
,
431 struct perf_counter
*ctrs
[], unsigned int *events
)
434 struct perf_counter
*counter
;
436 if (!is_software_counter(group
)) {
440 events
[n
++] = group
->hw
.config
;
442 list_for_each_entry(counter
, &group
->sibling_list
, list_entry
) {
443 if (!is_software_counter(counter
) &&
444 counter
->state
!= PERF_COUNTER_STATE_OFF
) {
448 events
[n
++] = counter
->hw
.config
;
454 static void counter_sched_in(struct perf_counter
*counter
, int cpu
)
456 counter
->state
= PERF_COUNTER_STATE_ACTIVE
;
457 counter
->oncpu
= cpu
;
458 counter
->tstamp_running
+= counter
->ctx
->time_now
-
459 counter
->tstamp_stopped
;
460 if (is_software_counter(counter
))
461 counter
->hw_ops
->enable(counter
);
465 * Called to enable a whole group of counters.
466 * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
467 * Assumes the caller has disabled interrupts and has
468 * frozen the PMU with hw_perf_save_disable.
470 int hw_perf_group_sched_in(struct perf_counter
*group_leader
,
471 struct perf_cpu_context
*cpuctx
,
472 struct perf_counter_context
*ctx
, int cpu
)
474 struct cpu_hw_counters
*cpuhw
;
476 struct perf_counter
*sub
;
478 cpuhw
= &__get_cpu_var(cpu_hw_counters
);
479 n0
= cpuhw
->n_counters
;
480 n
= collect_events(group_leader
, ppmu
->n_counter
- n0
,
481 &cpuhw
->counter
[n0
], &cpuhw
->events
[n0
]);
484 if (check_excludes(cpuhw
->counter
, n0
, n
))
486 if (power_check_constraints(cpuhw
->events
, n
+ n0
))
488 cpuhw
->n_counters
= n0
+ n
;
492 * OK, this group can go on; update counter states etc.,
493 * and enable any software counters
495 for (i
= n0
; i
< n0
+ n
; ++i
)
496 cpuhw
->counter
[i
]->hw
.config
= cpuhw
->events
[i
];
497 cpuctx
->active_oncpu
+= n
;
499 counter_sched_in(group_leader
, cpu
);
500 list_for_each_entry(sub
, &group_leader
->sibling_list
, list_entry
) {
501 if (sub
->state
!= PERF_COUNTER_STATE_OFF
) {
502 counter_sched_in(sub
, cpu
);
512 * Add a counter to the PMU.
513 * If all counters are not already frozen, then we disable and
514 * re-enable the PMU in order to get hw_perf_restore to do the
515 * actual work of reconfiguring the PMU.
517 static int power_perf_enable(struct perf_counter
*counter
)
519 struct cpu_hw_counters
*cpuhw
;
525 local_irq_save(flags
);
526 pmudis
= hw_perf_save_disable();
529 * Add the counter to the list (if there is room)
530 * and check whether the total set is still feasible.
532 cpuhw
= &__get_cpu_var(cpu_hw_counters
);
533 n0
= cpuhw
->n_counters
;
534 if (n0
>= ppmu
->n_counter
)
536 cpuhw
->counter
[n0
] = counter
;
537 cpuhw
->events
[n0
] = counter
->hw
.config
;
538 if (check_excludes(cpuhw
->counter
, n0
, 1))
540 if (power_check_constraints(cpuhw
->events
, n0
+ 1))
543 counter
->hw
.config
= cpuhw
->events
[n0
];
549 hw_perf_restore(pmudis
);
550 local_irq_restore(flags
);
555 * Remove a counter from the PMU.
557 static void power_perf_disable(struct perf_counter
*counter
)
559 struct cpu_hw_counters
*cpuhw
;
564 local_irq_save(flags
);
565 pmudis
= hw_perf_save_disable();
567 power_perf_read(counter
);
569 cpuhw
= &__get_cpu_var(cpu_hw_counters
);
570 for (i
= 0; i
< cpuhw
->n_counters
; ++i
) {
571 if (counter
== cpuhw
->counter
[i
]) {
572 while (++i
< cpuhw
->n_counters
)
573 cpuhw
->counter
[i
-1] = cpuhw
->counter
[i
];
575 ppmu
->disable_pmc(counter
->hw
.idx
- 1, cpuhw
->mmcr
);
576 write_pmc(counter
->hw
.idx
, 0);
578 perf_counter_update_userpage(counter
);
582 if (cpuhw
->n_counters
== 0) {
583 /* disable exceptions if no counters are running */
584 cpuhw
->mmcr
[0] &= ~(MMCR0_PMXE
| MMCR0_FCECE
);
587 hw_perf_restore(pmudis
);
588 local_irq_restore(flags
);
591 struct hw_perf_counter_ops power_perf_ops
= {
592 .enable
= power_perf_enable
,
593 .disable
= power_perf_disable
,
594 .read
= power_perf_read
597 const struct hw_perf_counter_ops
*
598 hw_perf_counter_init(struct perf_counter
*counter
)
601 struct perf_counter
*ctrs
[MAX_HWCOUNTERS
];
602 unsigned int events
[MAX_HWCOUNTERS
];
607 if ((s64
)counter
->hw_event
.irq_period
< 0)
609 if (!perf_event_raw(&counter
->hw_event
)) {
610 ev
= perf_event_id(&counter
->hw_event
);
611 if (ev
>= ppmu
->n_generic
|| ppmu
->generic_events
[ev
] == 0)
613 ev
= ppmu
->generic_events
[ev
];
615 ev
= perf_event_config(&counter
->hw_event
);
617 counter
->hw
.config_base
= ev
;
621 * If we are not running on a hypervisor, force the
622 * exclude_hv bit to 0 so that we don't care what
623 * the user set it to.
625 if (!firmware_has_feature(FW_FEATURE_LPAR
))
626 counter
->hw_event
.exclude_hv
= 0;
629 * If this is in a group, check if it can go on with all the
630 * other hardware counters in the group. We assume the counter
631 * hasn't been linked into its leader's sibling list at this point.
634 if (counter
->group_leader
!= counter
) {
635 n
= collect_events(counter
->group_leader
, ppmu
->n_counter
- 1,
642 if (check_excludes(ctrs
, n
, 1))
644 if (power_check_constraints(events
, n
+ 1))
647 counter
->hw
.config
= events
[n
];
648 atomic64_set(&counter
->hw
.period_left
, counter
->hw_event
.irq_period
);
649 return &power_perf_ops
;
653 * A counter has overflowed; update its count and record
654 * things if requested. Note that interrupts are hard-disabled
655 * here so there is no possibility of being interrupted.
657 static void record_and_restart(struct perf_counter
*counter
, long val
,
658 struct pt_regs
*regs
)
660 s64 prev
, delta
, left
;
663 /* we don't have to worry about interrupts here */
664 prev
= atomic64_read(&counter
->hw
.prev_count
);
665 delta
= (val
- prev
) & 0xfffffffful
;
666 atomic64_add(delta
, &counter
->count
);
669 * See if the total period for this counter has expired,
670 * and update for the next period.
673 left
= atomic64_read(&counter
->hw
.period_left
) - delta
;
674 if (counter
->hw_event
.irq_period
) {
676 left
+= counter
->hw_event
.irq_period
;
678 left
= counter
->hw_event
.irq_period
;
681 if (left
< 0x80000000L
)
682 val
= 0x80000000L
- left
;
684 write_pmc(counter
->hw
.idx
, val
);
685 atomic64_set(&counter
->hw
.prev_count
, val
);
686 atomic64_set(&counter
->hw
.period_left
, left
);
687 perf_counter_update_userpage(counter
);
690 * Finally record data if requested.
693 perf_counter_output(counter
, 1, regs
);
697 * Performance monitor interrupt stuff
699 static void perf_counter_interrupt(struct pt_regs
*regs
)
702 struct cpu_hw_counters
*cpuhw
= &__get_cpu_var(cpu_hw_counters
);
703 struct perf_counter
*counter
;
707 for (i
= 0; i
< cpuhw
->n_counters
; ++i
) {
708 counter
= cpuhw
->counter
[i
];
709 val
= read_pmc(counter
->hw
.idx
);
711 /* counter has overflowed */
713 record_and_restart(counter
, val
, regs
);
718 * In case we didn't find and reset the counter that caused
719 * the interrupt, scan all counters and reset any that are
720 * negative, to avoid getting continual interrupts.
721 * Any that we processed in the previous loop will not be negative.
724 for (i
= 0; i
< ppmu
->n_counter
; ++i
) {
725 val
= read_pmc(i
+ 1);
732 * Reset MMCR0 to its normal value. This will set PMXE and
733 * clear FC (freeze counters) and PMAO (perf mon alert occurred)
734 * and thus allow interrupts to occur again.
735 * XXX might want to use MSR.PM to keep the counters frozen until
736 * we get back out of this interrupt.
738 mtspr(SPRN_MMCR0
, cpuhw
->mmcr
[0]);
741 * If we need a wakeup, check whether interrupts were soft-enabled
742 * when we took the interrupt. If they were, we can wake stuff up
743 * immediately; otherwise we'll have do the wakeup when interrupts
746 if (test_perf_counter_pending() && regs
->softe
) {
748 clear_perf_counter_pending();
749 perf_counter_do_pending();
754 void hw_perf_counter_setup(int cpu
)
756 struct cpu_hw_counters
*cpuhw
= &per_cpu(cpu_hw_counters
, cpu
);
758 memset(cpuhw
, 0, sizeof(*cpuhw
));
759 cpuhw
->mmcr
[0] = MMCR0_FC
;
762 extern struct power_pmu power4_pmu
;
763 extern struct power_pmu ppc970_pmu
;
764 extern struct power_pmu power5_pmu
;
765 extern struct power_pmu power5p_pmu
;
766 extern struct power_pmu power6_pmu
;
768 static int init_perf_counters(void)
772 if (reserve_pmc_hardware(perf_counter_interrupt
)) {
773 printk(KERN_ERR
"Couldn't init performance monitor subsystem\n");
777 /* XXX should get this from cputable */
778 pvr
= mfspr(SPRN_PVR
);
779 switch (PVR_VER(pvr
)) {
801 * Use FCHV to ignore kernel events if MSR.HV is set.
803 if (mfmsr() & MSR_HV
)
804 freeze_counters_kernel
= MMCR0_FCHV
;
809 arch_initcall(init_perf_counters
);