2 * Performance counter support - powerpc architecture code
4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/perf_counter.h>
14 #include <linux/percpu.h>
15 #include <linux/hardirq.h>
18 #include <asm/machdep.h>
20 struct cpu_hw_counters
{
25 struct perf_counter
*counter
[MAX_HWCOUNTERS
];
26 unsigned int events
[MAX_HWCOUNTERS
];
30 DEFINE_PER_CPU(struct cpu_hw_counters
, cpu_hw_counters
);
32 struct power_pmu
*ppmu
;
34 void perf_counter_print_debug(void)
39 * Read one performance monitor counter (PMC).
41 static unsigned long read_pmc(int idx
)
47 val
= mfspr(SPRN_PMC1
);
50 val
= mfspr(SPRN_PMC2
);
53 val
= mfspr(SPRN_PMC3
);
56 val
= mfspr(SPRN_PMC4
);
59 val
= mfspr(SPRN_PMC5
);
62 val
= mfspr(SPRN_PMC6
);
65 val
= mfspr(SPRN_PMC7
);
68 val
= mfspr(SPRN_PMC8
);
71 printk(KERN_ERR
"oops trying to read PMC%d\n", idx
);
80 static void write_pmc(int idx
, unsigned long val
)
84 mtspr(SPRN_PMC1
, val
);
87 mtspr(SPRN_PMC2
, val
);
90 mtspr(SPRN_PMC3
, val
);
93 mtspr(SPRN_PMC4
, val
);
96 mtspr(SPRN_PMC5
, val
);
99 mtspr(SPRN_PMC6
, val
);
102 mtspr(SPRN_PMC7
, val
);
105 mtspr(SPRN_PMC8
, val
);
108 printk(KERN_ERR
"oops trying to write PMC%d\n", idx
);
113 * Check if a set of events can all go on the PMU at once.
114 * If they can't, this will look at alternative codes for the events
115 * and see if any combination of alternative codes is feasible.
116 * The feasible set is returned in event[].
118 static int power_check_constraints(unsigned int event
[], int n_ev
)
121 unsigned int alternatives
[MAX_HWCOUNTERS
][MAX_EVENT_ALTERNATIVES
];
122 u64 amasks
[MAX_HWCOUNTERS
][MAX_EVENT_ALTERNATIVES
];
123 u64 avalues
[MAX_HWCOUNTERS
][MAX_EVENT_ALTERNATIVES
];
124 u64 smasks
[MAX_HWCOUNTERS
], svalues
[MAX_HWCOUNTERS
];
125 int n_alt
[MAX_HWCOUNTERS
], choice
[MAX_HWCOUNTERS
];
127 u64 addf
= ppmu
->add_fields
;
128 u64 tadd
= ppmu
->test_adder
;
130 if (n_ev
> ppmu
->n_counter
)
133 /* First see if the events will go on as-is */
134 for (i
= 0; i
< n_ev
; ++i
) {
135 alternatives
[i
][0] = event
[i
];
136 if (ppmu
->get_constraint(event
[i
], &amasks
[i
][0],
142 for (i
= 0; i
< n_ev
; ++i
) {
143 nv
= (value
| avalues
[i
][0]) + (value
& avalues
[i
][0] & addf
);
144 if ((((nv
+ tadd
) ^ value
) & mask
) != 0 ||
145 (((nv
+ tadd
) ^ avalues
[i
][0]) & amasks
[i
][0]) != 0)
148 mask
|= amasks
[i
][0];
151 return 0; /* all OK */
153 /* doesn't work, gather alternatives... */
154 if (!ppmu
->get_alternatives
)
156 for (i
= 0; i
< n_ev
; ++i
) {
157 n_alt
[i
] = ppmu
->get_alternatives(event
[i
], alternatives
[i
]);
158 for (j
= 1; j
< n_alt
[i
]; ++j
)
159 ppmu
->get_constraint(alternatives
[i
][j
],
160 &amasks
[i
][j
], &avalues
[i
][j
]);
163 /* enumerate all possibilities and see if any will work */
166 value
= mask
= nv
= 0;
169 /* we're backtracking, restore context */
175 * See if any alternative k for event i,
176 * where k > j, will satisfy the constraints.
178 while (++j
< n_alt
[i
]) {
179 nv
= (value
| avalues
[i
][j
]) +
180 (value
& avalues
[i
][j
] & addf
);
181 if ((((nv
+ tadd
) ^ value
) & mask
) == 0 &&
182 (((nv
+ tadd
) ^ avalues
[i
][j
])
183 & amasks
[i
][j
]) == 0)
188 * No feasible alternative, backtrack
189 * to event i-1 and continue enumerating its
190 * alternatives from where we got up to.
196 * Found a feasible alternative for event i,
197 * remember where we got up to with this event,
198 * go on to the next event, and start with
199 * the first alternative for it.
205 mask
|= amasks
[i
][j
];
211 /* OK, we have a feasible combination, tell the caller the solution */
212 for (i
= 0; i
< n_ev
; ++i
)
213 event
[i
] = alternatives
[i
][choice
[i
]];
217 static void power_perf_read(struct perf_counter
*counter
)
219 long val
, delta
, prev
;
221 if (!counter
->hw
.idx
)
224 * Performance monitor interrupts come even when interrupts
225 * are soft-disabled, as long as interrupts are hard-enabled.
226 * Therefore we treat them like NMIs.
229 prev
= atomic64_read(&counter
->hw
.prev_count
);
231 val
= read_pmc(counter
->hw
.idx
);
232 } while (atomic64_cmpxchg(&counter
->hw
.prev_count
, prev
, val
) != prev
);
234 /* The counters are only 32 bits wide */
235 delta
= (val
- prev
) & 0xfffffffful
;
236 atomic64_add(delta
, &counter
->count
);
237 atomic64_sub(delta
, &counter
->hw
.period_left
);
241 * Disable all counters to prevent PMU interrupts and to allow
242 * counters to be added or removed.
244 u64
hw_perf_save_disable(void)
246 struct cpu_hw_counters
*cpuhw
;
250 local_irq_save(flags
);
251 cpuhw
= &__get_cpu_var(cpu_hw_counters
);
253 ret
= cpuhw
->disabled
;
259 * Check if we ever enabled the PMU on this cpu.
261 if (!cpuhw
->pmcs_enabled
) {
262 if (ppc_md
.enable_pmcs
)
263 ppc_md
.enable_pmcs();
264 cpuhw
->pmcs_enabled
= 1;
268 * Set the 'freeze counters' bit.
269 * The barrier is to make sure the mtspr has been
270 * executed and the PMU has frozen the counters
273 mtspr(SPRN_MMCR0
, mfspr(SPRN_MMCR0
) | MMCR0_FC
);
276 local_irq_restore(flags
);
281 * Re-enable all counters if disable == 0.
282 * If we were previously disabled and counters were added, then
283 * put the new config on the PMU.
285 void hw_perf_restore(u64 disable
)
287 struct perf_counter
*counter
;
288 struct cpu_hw_counters
*cpuhw
;
293 unsigned int hwc_index
[MAX_HWCOUNTERS
];
297 local_irq_save(flags
);
298 cpuhw
= &__get_cpu_var(cpu_hw_counters
);
302 * If we didn't change anything, or only removed counters,
303 * no need to recalculate MMCR* settings and reset the PMCs.
304 * Just reenable the PMU with the current MMCR* settings
305 * (possibly updated for removal of counters).
307 if (!cpuhw
->n_added
) {
308 mtspr(SPRN_MMCRA
, cpuhw
->mmcr
[2]);
309 mtspr(SPRN_MMCR1
, cpuhw
->mmcr
[1]);
310 mtspr(SPRN_MMCR0
, cpuhw
->mmcr
[0]);
311 if (cpuhw
->n_counters
== 0)
312 get_lppaca()->pmcregs_in_use
= 0;
317 * Compute MMCR* values for the new set of counters
319 if (ppmu
->compute_mmcr(cpuhw
->events
, cpuhw
->n_counters
, hwc_index
,
321 /* shouldn't ever get here */
322 printk(KERN_ERR
"oops compute_mmcr failed\n");
327 * Write the new configuration to MMCR* with the freeze
328 * bit set and set the hardware counters to their initial values.
329 * Then unfreeze the counters.
331 get_lppaca()->pmcregs_in_use
= 1;
332 mtspr(SPRN_MMCRA
, cpuhw
->mmcr
[2]);
333 mtspr(SPRN_MMCR1
, cpuhw
->mmcr
[1]);
334 mtspr(SPRN_MMCR0
, (cpuhw
->mmcr
[0] & ~(MMCR0_PMC1CE
| MMCR0_PMCjCE
))
338 * Read off any pre-existing counters that need to move
341 for (i
= 0; i
< cpuhw
->n_counters
; ++i
) {
342 counter
= cpuhw
->counter
[i
];
343 if (counter
->hw
.idx
&& counter
->hw
.idx
!= hwc_index
[i
] + 1) {
344 power_perf_read(counter
);
345 write_pmc(counter
->hw
.idx
, 0);
351 * Initialize the PMCs for all the new and moved counters.
353 for (i
= 0; i
< cpuhw
->n_counters
; ++i
) {
354 counter
= cpuhw
->counter
[i
];
358 if (counter
->hw_event
.irq_period
) {
359 left
= atomic64_read(&counter
->hw
.period_left
);
360 if (left
< 0x80000000L
)
361 val
= 0x80000000L
- left
;
363 atomic64_set(&counter
->hw
.prev_count
, val
);
364 counter
->hw
.idx
= hwc_index
[i
] + 1;
365 write_pmc(counter
->hw
.idx
, val
);
368 cpuhw
->mmcr
[0] |= MMCR0_PMXE
| MMCR0_FCECE
;
369 mtspr(SPRN_MMCR0
, cpuhw
->mmcr
[0]);
372 local_irq_restore(flags
);
375 static int collect_events(struct perf_counter
*group
, int max_count
,
376 struct perf_counter
*ctrs
[], unsigned int *events
)
379 struct perf_counter
*counter
;
381 if (!is_software_counter(group
)) {
385 events
[n
++] = group
->hw
.config
;
387 list_for_each_entry(counter
, &group
->sibling_list
, list_entry
) {
388 if (!is_software_counter(counter
) &&
389 counter
->state
!= PERF_COUNTER_STATE_OFF
) {
393 events
[n
++] = counter
->hw
.config
;
399 static void counter_sched_in(struct perf_counter
*counter
, int cpu
)
401 counter
->state
= PERF_COUNTER_STATE_ACTIVE
;
402 counter
->oncpu
= cpu
;
403 if (is_software_counter(counter
))
404 counter
->hw_ops
->enable(counter
);
408 * Called to enable a whole group of counters.
409 * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
410 * Assumes the caller has disabled interrupts and has
411 * frozen the PMU with hw_perf_save_disable.
413 int hw_perf_group_sched_in(struct perf_counter
*group_leader
,
414 struct perf_cpu_context
*cpuctx
,
415 struct perf_counter_context
*ctx
, int cpu
)
417 struct cpu_hw_counters
*cpuhw
;
419 struct perf_counter
*sub
;
421 cpuhw
= &__get_cpu_var(cpu_hw_counters
);
422 n0
= cpuhw
->n_counters
;
423 n
= collect_events(group_leader
, ppmu
->n_counter
- n0
,
424 &cpuhw
->counter
[n0
], &cpuhw
->events
[n0
]);
427 if (power_check_constraints(cpuhw
->events
, n
+ n0
))
429 cpuhw
->n_counters
= n0
+ n
;
433 * OK, this group can go on; update counter states etc.,
434 * and enable any software counters
436 for (i
= n0
; i
< n0
+ n
; ++i
)
437 cpuhw
->counter
[i
]->hw
.config
= cpuhw
->events
[i
];
438 cpuctx
->active_oncpu
+= n
;
440 counter_sched_in(group_leader
, cpu
);
441 list_for_each_entry(sub
, &group_leader
->sibling_list
, list_entry
) {
442 if (sub
->state
!= PERF_COUNTER_STATE_OFF
) {
443 counter_sched_in(sub
, cpu
);
453 * Add a counter to the PMU.
454 * If all counters are not already frozen, then we disable and
455 * re-enable the PMU in order to get hw_perf_restore to do the
456 * actual work of reconfiguring the PMU.
458 static int power_perf_enable(struct perf_counter
*counter
)
460 struct cpu_hw_counters
*cpuhw
;
466 local_irq_save(flags
);
467 pmudis
= hw_perf_save_disable();
470 * Add the counter to the list (if there is room)
471 * and check whether the total set is still feasible.
473 cpuhw
= &__get_cpu_var(cpu_hw_counters
);
474 n0
= cpuhw
->n_counters
;
475 if (n0
>= ppmu
->n_counter
)
477 cpuhw
->counter
[n0
] = counter
;
478 cpuhw
->events
[n0
] = counter
->hw
.config
;
479 if (power_check_constraints(cpuhw
->events
, n0
+ 1))
482 counter
->hw
.config
= cpuhw
->events
[n0
];
488 hw_perf_restore(pmudis
);
489 local_irq_restore(flags
);
494 * Remove a counter from the PMU.
496 static void power_perf_disable(struct perf_counter
*counter
)
498 struct cpu_hw_counters
*cpuhw
;
503 local_irq_save(flags
);
504 pmudis
= hw_perf_save_disable();
506 power_perf_read(counter
);
508 cpuhw
= &__get_cpu_var(cpu_hw_counters
);
509 for (i
= 0; i
< cpuhw
->n_counters
; ++i
) {
510 if (counter
== cpuhw
->counter
[i
]) {
511 while (++i
< cpuhw
->n_counters
)
512 cpuhw
->counter
[i
-1] = cpuhw
->counter
[i
];
514 ppmu
->disable_pmc(counter
->hw
.idx
- 1, cpuhw
->mmcr
);
515 write_pmc(counter
->hw
.idx
, 0);
520 if (cpuhw
->n_counters
== 0) {
521 /* disable exceptions if no counters are running */
522 cpuhw
->mmcr
[0] &= ~(MMCR0_PMXE
| MMCR0_FCECE
);
525 hw_perf_restore(pmudis
);
526 local_irq_restore(flags
);
529 struct hw_perf_counter_ops power_perf_ops
= {
530 .enable
= power_perf_enable
,
531 .disable
= power_perf_disable
,
532 .read
= power_perf_read
535 const struct hw_perf_counter_ops
*
536 hw_perf_counter_init(struct perf_counter
*counter
)
539 struct perf_counter
*ctrs
[MAX_HWCOUNTERS
];
540 unsigned int events
[MAX_HWCOUNTERS
];
545 if ((s64
)counter
->hw_event
.irq_period
< 0)
547 ev
= counter
->hw_event
.type
;
548 if (!counter
->hw_event
.raw
) {
549 if (ev
>= ppmu
->n_generic
||
550 ppmu
->generic_events
[ev
] == 0)
552 ev
= ppmu
->generic_events
[ev
];
554 counter
->hw
.config_base
= ev
;
558 * If this is in a group, check if it can go on with all the
559 * other hardware counters in the group. We assume the counter
560 * hasn't been linked into its leader's sibling list at this point.
563 if (counter
->group_leader
!= counter
) {
564 n
= collect_events(counter
->group_leader
, ppmu
->n_counter
- 1,
570 if (power_check_constraints(events
, n
))
573 counter
->hw
.config
= events
[n
- 1];
574 atomic64_set(&counter
->hw
.period_left
, counter
->hw_event
.irq_period
);
575 return &power_perf_ops
;
581 void perf_counter_do_pending(void)
584 struct cpu_hw_counters
*cpuhw
= &__get_cpu_var(cpu_hw_counters
);
585 struct perf_counter
*counter
;
587 set_perf_counter_pending(0);
588 for (i
= 0; i
< cpuhw
->n_counters
; ++i
) {
589 counter
= cpuhw
->counter
[i
];
590 if (counter
&& counter
->wakeup_pending
) {
591 counter
->wakeup_pending
= 0;
592 wake_up(&counter
->waitq
);
598 * Record data for an irq counter.
599 * This function was lifted from the x86 code; maybe it should
602 static void perf_store_irq_data(struct perf_counter
*counter
, u64 data
)
604 struct perf_data
*irqdata
= counter
->irqdata
;
606 if (irqdata
->len
> PERF_DATA_BUFLEN
- sizeof(u64
)) {
609 u64
*p
= (u64
*) &irqdata
->data
[irqdata
->len
];
612 irqdata
->len
+= sizeof(u64
);
617 * Record all the values of the counters in a group
619 static void perf_handle_group(struct perf_counter
*counter
)
621 struct perf_counter
*leader
, *sub
;
623 leader
= counter
->group_leader
;
624 list_for_each_entry(sub
, &leader
->sibling_list
, list_entry
) {
626 sub
->hw_ops
->read(sub
);
627 perf_store_irq_data(counter
, sub
->hw_event
.type
);
628 perf_store_irq_data(counter
, atomic64_read(&sub
->count
));
633 * A counter has overflowed; update its count and record
634 * things if requested. Note that interrupts are hard-disabled
635 * here so there is no possibility of being interrupted.
637 static void record_and_restart(struct perf_counter
*counter
, long val
,
638 struct pt_regs
*regs
)
640 s64 prev
, delta
, left
;
643 /* we don't have to worry about interrupts here */
644 prev
= atomic64_read(&counter
->hw
.prev_count
);
645 delta
= (val
- prev
) & 0xfffffffful
;
646 atomic64_add(delta
, &counter
->count
);
649 * See if the total period for this counter has expired,
650 * and update for the next period.
653 left
= atomic64_read(&counter
->hw
.period_left
) - delta
;
654 if (counter
->hw_event
.irq_period
) {
656 left
+= counter
->hw_event
.irq_period
;
658 left
= counter
->hw_event
.irq_period
;
661 if (left
< 0x80000000L
)
662 val
= 0x80000000L
- left
;
664 write_pmc(counter
->hw
.idx
, val
);
665 atomic64_set(&counter
->hw
.prev_count
, val
);
666 atomic64_set(&counter
->hw
.period_left
, left
);
669 * Finally record data if requested.
672 switch (counter
->hw_event
.record_type
) {
673 case PERF_RECORD_SIMPLE
:
675 case PERF_RECORD_IRQ
:
676 perf_store_irq_data(counter
, instruction_pointer(regs
));
677 counter
->wakeup_pending
= 1;
679 case PERF_RECORD_GROUP
:
680 perf_handle_group(counter
);
681 counter
->wakeup_pending
= 1;
688 * Performance monitor interrupt stuff
690 static void perf_counter_interrupt(struct pt_regs
*regs
)
693 struct cpu_hw_counters
*cpuhw
= &__get_cpu_var(cpu_hw_counters
);
694 struct perf_counter
*counter
;
696 int need_wakeup
= 0, found
= 0;
698 for (i
= 0; i
< cpuhw
->n_counters
; ++i
) {
699 counter
= cpuhw
->counter
[i
];
700 val
= read_pmc(counter
->hw
.idx
);
702 /* counter has overflowed */
704 record_and_restart(counter
, val
, regs
);
705 if (counter
->wakeup_pending
)
711 * In case we didn't find and reset the counter that caused
712 * the interrupt, scan all counters and reset any that are
713 * negative, to avoid getting continual interrupts.
714 * Any that we processed in the previous loop will not be negative.
717 for (i
= 0; i
< ppmu
->n_counter
; ++i
) {
718 val
= read_pmc(i
+ 1);
725 * Reset MMCR0 to its normal value. This will set PMXE and
726 * clear FC (freeze counters) and PMAO (perf mon alert occurred)
727 * and thus allow interrupts to occur again.
728 * XXX might want to use MSR.PM to keep the counters frozen until
729 * we get back out of this interrupt.
731 mtspr(SPRN_MMCR0
, cpuhw
->mmcr
[0]);
734 * If we need a wakeup, check whether interrupts were soft-enabled
735 * when we took the interrupt. If they were, we can wake stuff up
736 * immediately; otherwise we'll have to set a flag and do the
737 * wakeup when interrupts get soft-enabled.
742 perf_counter_do_pending();
745 set_perf_counter_pending(1);
750 void hw_perf_counter_setup(int cpu
)
752 struct cpu_hw_counters
*cpuhw
= &per_cpu(cpu_hw_counters
, cpu
);
754 memset(cpuhw
, 0, sizeof(*cpuhw
));
755 cpuhw
->mmcr
[0] = MMCR0_FC
;
758 extern struct power_pmu ppc970_pmu
;
759 extern struct power_pmu power6_pmu
;
761 static int init_perf_counters(void)
765 if (reserve_pmc_hardware(perf_counter_interrupt
)) {
766 printk(KERN_ERR
"Couldn't init performance monitor subsystem\n");
770 /* XXX should get this from cputable */
771 pvr
= mfspr(SPRN_PVR
);
772 switch (PVR_VER(pvr
)) {
785 arch_initcall(init_perf_counters
);