2 * Performance counter x86 architecture code
4 * Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
7 * For licencing details see kernel-base/COPYING
10 #include <linux/perf_counter.h>
11 #include <linux/capability.h>
12 #include <linux/notifier.h>
13 #include <linux/hardirq.h>
14 #include <linux/kprobes.h>
15 #include <linux/module.h>
16 #include <linux/kdebug.h>
17 #include <linux/sched.h>
19 #include <asm/intel_arch_perfmon.h>
22 static bool perf_counters_initialized __read_mostly
;
25 * Number of (generic) HW counters:
27 static int nr_hw_counters __read_mostly
;
28 static u32 perf_counter_mask __read_mostly
;
30 /* No support for fixed function counters yet */
32 #define MAX_HW_COUNTERS 8
34 struct cpu_hw_counters
{
35 struct perf_counter
*counters
[MAX_HW_COUNTERS
];
36 unsigned long used
[BITS_TO_LONGS(MAX_HW_COUNTERS
)];
40 * Intel PerfMon v3. Used on Core2 and later.
42 static DEFINE_PER_CPU(struct cpu_hw_counters
, cpu_hw_counters
);
44 const int intel_perfmon_event_map
[] =
46 [PERF_COUNT_CYCLES
] = 0x003c,
47 [PERF_COUNT_INSTRUCTIONS
] = 0x00c0,
48 [PERF_COUNT_CACHE_REFERENCES
] = 0x4f2e,
49 [PERF_COUNT_CACHE_MISSES
] = 0x412e,
50 [PERF_COUNT_BRANCH_INSTRUCTIONS
] = 0x00c4,
51 [PERF_COUNT_BRANCH_MISSES
] = 0x00c5,
54 const int max_intel_perfmon_events
= ARRAY_SIZE(intel_perfmon_event_map
);
57 * Propagate counter elapsed time into the generic counter.
58 * Can only be executed on the CPU where the counter is active.
59 * Returns the delta events processed.
62 x86_perf_counter_update(struct perf_counter
*counter
,
63 struct hw_perf_counter
*hwc
, int idx
)
65 u64 prev_raw_count
, new_raw_count
, delta
;
67 WARN_ON_ONCE(counter
->state
!= PERF_COUNTER_STATE_ACTIVE
);
69 * Careful: an NMI might modify the previous counter value.
71 * Our tactic to handle this is to first atomically read and
72 * exchange a new raw count - then add that new-prev delta
73 * count to the generic counter atomically:
76 prev_raw_count
= atomic64_read(&hwc
->prev_count
);
77 rdmsrl(hwc
->counter_base
+ idx
, new_raw_count
);
79 if (atomic64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
80 new_raw_count
) != prev_raw_count
)
84 * Now we have the new raw value and have updated the prev
85 * timestamp already. We can now calculate the elapsed delta
86 * (counter-)time and add that to the generic counter.
88 * Careful, not all hw sign-extends above the physical width
89 * of the count, so we do that by clipping the delta to 32 bits:
91 delta
= (u64
)(u32
)((s32
)new_raw_count
- (s32
)prev_raw_count
);
92 WARN_ON_ONCE((int)delta
< 0);
94 atomic64_add(delta
, &counter
->count
);
95 atomic64_sub(delta
, &hwc
->period_left
);
99 * Setup the hardware configuration for a given hw_event_type
101 static int __hw_perf_counter_init(struct perf_counter
*counter
)
103 struct perf_counter_hw_event
*hw_event
= &counter
->hw_event
;
104 struct hw_perf_counter
*hwc
= &counter
->hw
;
106 if (unlikely(!perf_counters_initialized
))
110 * Count user events, and generate PMC IRQs:
111 * (keep 'enabled' bit clear for now)
113 hwc
->config
= ARCH_PERFMON_EVENTSEL_USR
| ARCH_PERFMON_EVENTSEL_INT
;
116 * If privileged enough, count OS events too, and allow
117 * NMI events as well:
120 if (capable(CAP_SYS_ADMIN
)) {
121 hwc
->config
|= ARCH_PERFMON_EVENTSEL_OS
;
126 hwc
->config_base
= MSR_ARCH_PERFMON_EVENTSEL0
;
127 hwc
->counter_base
= MSR_ARCH_PERFMON_PERFCTR0
;
129 hwc
->irq_period
= hw_event
->irq_period
;
131 * Intel PMCs cannot be accessed sanely above 32 bit width,
132 * so we install an artificial 1<<31 period regardless of
133 * the generic counter period:
135 if ((s64
)hwc
->irq_period
<= 0 || hwc
->irq_period
> 0x7FFFFFFF)
136 hwc
->irq_period
= 0x7FFFFFFF;
138 atomic64_set(&hwc
->period_left
, hwc
->irq_period
);
141 * Raw event type provide the config in the event structure
144 hwc
->config
|= hw_event
->type
;
146 if (hw_event
->type
>= max_intel_perfmon_events
)
151 hwc
->config
|= intel_perfmon_event_map
[hw_event
->type
];
153 counter
->wakeup_pending
= 0;
158 void hw_perf_enable_all(void)
160 if (unlikely(!perf_counters_initialized
))
163 wrmsr(MSR_CORE_PERF_GLOBAL_CTRL
, perf_counter_mask
, 0);
166 u64
hw_perf_save_disable(void)
170 if (unlikely(!perf_counters_initialized
))
173 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, ctrl
);
174 wrmsr(MSR_CORE_PERF_GLOBAL_CTRL
, 0, 0);
178 EXPORT_SYMBOL_GPL(hw_perf_save_disable
);
180 void hw_perf_restore(u64 ctrl
)
182 if (unlikely(!perf_counters_initialized
))
185 wrmsr(MSR_CORE_PERF_GLOBAL_CTRL
, ctrl
, 0);
187 EXPORT_SYMBOL_GPL(hw_perf_restore
);
190 __x86_perf_counter_disable(struct perf_counter
*counter
,
191 struct hw_perf_counter
*hwc
, unsigned int idx
)
195 err
= wrmsr_safe(hwc
->config_base
+ idx
, hwc
->config
, 0);
199 static DEFINE_PER_CPU(u64
, prev_left
[MAX_HW_COUNTERS
]);
202 * Set the next IRQ period, based on the hwc->period_left value.
203 * To be called with the counter disabled in hw:
206 __hw_perf_counter_set_period(struct perf_counter
*counter
,
207 struct hw_perf_counter
*hwc
, int idx
)
209 s32 left
= atomic64_read(&hwc
->period_left
);
210 s32 period
= hwc
->irq_period
;
212 WARN_ON_ONCE(period
<= 0);
215 * If we are way outside a reasoable range then just skip forward:
217 if (unlikely(left
<= -period
)) {
219 atomic64_set(&hwc
->period_left
, left
);
222 if (unlikely(left
<= 0)) {
224 atomic64_set(&hwc
->period_left
, left
);
227 WARN_ON_ONCE(left
<= 0);
229 per_cpu(prev_left
[idx
], smp_processor_id()) = left
;
232 * The hw counter starts counting from this counter offset,
233 * mark it to be able to extra future deltas:
235 atomic64_set(&hwc
->prev_count
, (u64
)(s64
)-left
);
237 wrmsr(hwc
->counter_base
+ idx
, -left
, 0);
241 __x86_perf_counter_enable(struct perf_counter
*counter
,
242 struct hw_perf_counter
*hwc
, int idx
)
244 wrmsr(hwc
->config_base
+ idx
,
245 hwc
->config
| ARCH_PERFMON_EVENTSEL0_ENABLE
, 0);
249 * Find a PMC slot for the freshly enabled / scheduled in counter:
251 static void x86_perf_counter_enable(struct perf_counter
*counter
)
253 struct cpu_hw_counters
*cpuc
= &__get_cpu_var(cpu_hw_counters
);
254 struct hw_perf_counter
*hwc
= &counter
->hw
;
257 /* Try to get the previous counter again */
258 if (test_and_set_bit(idx
, cpuc
->used
)) {
259 idx
= find_first_zero_bit(cpuc
->used
, nr_hw_counters
);
260 set_bit(idx
, cpuc
->used
);
264 perf_counters_lapic_init(hwc
->nmi
);
266 __x86_perf_counter_disable(counter
, hwc
, idx
);
268 cpuc
->counters
[idx
] = counter
;
270 __hw_perf_counter_set_period(counter
, hwc
, idx
);
271 __x86_perf_counter_enable(counter
, hwc
, idx
);
274 void perf_counter_print_debug(void)
276 u64 ctrl
, status
, overflow
, pmc_ctrl
, pmc_count
, prev_left
;
284 cpu
= smp_processor_id();
286 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, ctrl
);
287 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS
, status
);
288 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL
, overflow
);
290 printk(KERN_INFO
"\n");
291 printk(KERN_INFO
"CPU#%d: ctrl: %016llx\n", cpu
, ctrl
);
292 printk(KERN_INFO
"CPU#%d: status: %016llx\n", cpu
, status
);
293 printk(KERN_INFO
"CPU#%d: overflow: %016llx\n", cpu
, overflow
);
295 for (idx
= 0; idx
< nr_hw_counters
; idx
++) {
296 rdmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ idx
, pmc_ctrl
);
297 rdmsrl(MSR_ARCH_PERFMON_PERFCTR0
+ idx
, pmc_count
);
299 prev_left
= per_cpu(prev_left
[idx
], cpu
);
301 printk(KERN_INFO
"CPU#%d: PMC%d ctrl: %016llx\n",
303 printk(KERN_INFO
"CPU#%d: PMC%d count: %016llx\n",
304 cpu
, idx
, pmc_count
);
305 printk(KERN_INFO
"CPU#%d: PMC%d left: %016llx\n",
306 cpu
, idx
, prev_left
);
311 static void x86_perf_counter_disable(struct perf_counter
*counter
)
313 struct cpu_hw_counters
*cpuc
= &__get_cpu_var(cpu_hw_counters
);
314 struct hw_perf_counter
*hwc
= &counter
->hw
;
315 unsigned int idx
= hwc
->idx
;
317 __x86_perf_counter_disable(counter
, hwc
, idx
);
319 clear_bit(idx
, cpuc
->used
);
320 cpuc
->counters
[idx
] = NULL
;
323 * Drain the remaining delta count out of a counter
324 * that we are disabling:
326 x86_perf_counter_update(counter
, hwc
, idx
);
329 static void perf_store_irq_data(struct perf_counter
*counter
, u64 data
)
331 struct perf_data
*irqdata
= counter
->irqdata
;
333 if (irqdata
->len
> PERF_DATA_BUFLEN
- sizeof(u64
)) {
336 u64
*p
= (u64
*) &irqdata
->data
[irqdata
->len
];
339 irqdata
->len
+= sizeof(u64
);
344 * Save and restart an expired counter. Called by NMI contexts,
345 * so it has to be careful about preempting normal counter ops:
347 static void perf_save_and_restart(struct perf_counter
*counter
)
349 struct hw_perf_counter
*hwc
= &counter
->hw
;
353 rdmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ idx
, pmc_ctrl
);
355 x86_perf_counter_update(counter
, hwc
, idx
);
356 __hw_perf_counter_set_period(counter
, hwc
, idx
);
358 if (pmc_ctrl
& ARCH_PERFMON_EVENTSEL0_ENABLE
)
359 __x86_perf_counter_enable(counter
, hwc
, idx
);
363 perf_handle_group(struct perf_counter
*sibling
, u64
*status
, u64
*overflown
)
365 struct perf_counter
*counter
, *group_leader
= sibling
->group_leader
;
368 * Store sibling timestamps (if any):
370 list_for_each_entry(counter
, &group_leader
->sibling_list
, list_entry
) {
371 x86_perf_counter_update(counter
, &counter
->hw
, counter
->hw
.idx
);
372 perf_store_irq_data(sibling
, counter
->hw_event
.type
);
373 perf_store_irq_data(sibling
, atomic64_read(&counter
->count
));
378 * This handler is triggered by the local APIC, so the APIC IRQ handling
381 static void __smp_perf_counter_interrupt(struct pt_regs
*regs
, int nmi
)
383 int bit
, cpu
= smp_processor_id();
384 u64 ack
, status
, saved_global
;
385 struct cpu_hw_counters
*cpuc
;
387 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, saved_global
);
389 /* Disable counters globally */
390 wrmsr(MSR_CORE_PERF_GLOBAL_CTRL
, 0, 0);
393 cpuc
= &per_cpu(cpu_hw_counters
, cpu
);
395 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS
, status
);
401 for_each_bit(bit
, (unsigned long *) &status
, nr_hw_counters
) {
402 struct perf_counter
*counter
= cpuc
->counters
[bit
];
404 clear_bit(bit
, (unsigned long *) &status
);
408 perf_save_and_restart(counter
);
410 switch (counter
->hw_event
.record_type
) {
411 case PERF_RECORD_SIMPLE
:
413 case PERF_RECORD_IRQ
:
414 perf_store_irq_data(counter
, instruction_pointer(regs
));
416 case PERF_RECORD_GROUP
:
417 perf_handle_group(counter
, &status
, &ack
);
421 * From NMI context we cannot call into the scheduler to
422 * do a task wakeup - but we mark these counters as
423 * wakeup_pending and initate a wakeup callback:
426 counter
->wakeup_pending
= 1;
427 set_tsk_thread_flag(current
, TIF_PERF_COUNTERS
);
429 wake_up(&counter
->waitq
);
433 wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL
, ack
, 0);
436 * Repeat if there is more work to be done:
438 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS
, status
);
443 * Restore - do not reenable when global enable is off:
445 wrmsr(MSR_CORE_PERF_GLOBAL_CTRL
, saved_global
, 0);
448 void smp_perf_counter_interrupt(struct pt_regs
*regs
)
451 inc_irq_stat(apic_perf_irqs
);
452 apic_write(APIC_LVTPC
, LOCAL_PERF_VECTOR
);
453 __smp_perf_counter_interrupt(regs
, 0);
459 * This handler is triggered by NMI contexts:
461 void perf_counter_notify(struct pt_regs
*regs
)
463 struct cpu_hw_counters
*cpuc
;
467 local_irq_save(flags
);
468 cpu
= smp_processor_id();
469 cpuc
= &per_cpu(cpu_hw_counters
, cpu
);
471 for_each_bit(bit
, cpuc
->used
, nr_hw_counters
) {
472 struct perf_counter
*counter
= cpuc
->counters
[bit
];
477 if (counter
->wakeup_pending
) {
478 counter
->wakeup_pending
= 0;
479 wake_up(&counter
->waitq
);
483 local_irq_restore(flags
);
486 void __cpuinit
perf_counters_lapic_init(int nmi
)
490 if (!perf_counters_initialized
)
493 * Enable the performance counter vector in the APIC LVT:
495 apic_val
= apic_read(APIC_LVTERR
);
497 apic_write(APIC_LVTERR
, apic_val
| APIC_LVT_MASKED
);
499 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
501 apic_write(APIC_LVTPC
, LOCAL_PERF_VECTOR
);
502 apic_write(APIC_LVTERR
, apic_val
);
506 perf_counter_nmi_handler(struct notifier_block
*self
,
507 unsigned long cmd
, void *__args
)
509 struct die_args
*args
= __args
;
510 struct pt_regs
*regs
;
512 if (likely(cmd
!= DIE_NMI_IPI
))
517 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
518 __smp_perf_counter_interrupt(regs
, 1);
523 static __read_mostly
struct notifier_block perf_counter_nmi_notifier
= {
524 .notifier_call
= perf_counter_nmi_handler
527 void __init
init_hw_perf_counters(void)
529 union cpuid10_eax eax
;
533 if (!cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
))
537 * Check whether the Architectural PerfMon supports
538 * Branch Misses Retired Event or not.
540 cpuid(10, &(eax
.full
), &ebx
, &unused
, &unused
);
541 if (eax
.split
.mask_length
<= ARCH_PERFMON_BRANCH_MISSES_RETIRED
)
544 printk(KERN_INFO
"Intel Performance Monitoring support detected.\n");
546 printk(KERN_INFO
"... version: %d\n", eax
.split
.version_id
);
547 printk(KERN_INFO
"... num_counters: %d\n", eax
.split
.num_counters
);
548 nr_hw_counters
= eax
.split
.num_counters
;
549 if (nr_hw_counters
> MAX_HW_COUNTERS
) {
550 nr_hw_counters
= MAX_HW_COUNTERS
;
551 WARN(1, KERN_ERR
"hw perf counters %d > max(%d), clipping!",
552 nr_hw_counters
, MAX_HW_COUNTERS
);
554 perf_counter_mask
= (1 << nr_hw_counters
) - 1;
555 perf_max_counters
= nr_hw_counters
;
557 printk(KERN_INFO
"... bit_width: %d\n", eax
.split
.bit_width
);
558 printk(KERN_INFO
"... mask_length: %d\n", eax
.split
.mask_length
);
560 perf_counters_lapic_init(0);
561 register_die_notifier(&perf_counter_nmi_notifier
);
563 perf_counters_initialized
= true;
566 static void x86_perf_counter_read(struct perf_counter
*counter
)
568 x86_perf_counter_update(counter
, &counter
->hw
, counter
->hw
.idx
);
571 static const struct hw_perf_counter_ops x86_perf_counter_ops
= {
572 .hw_perf_counter_enable
= x86_perf_counter_enable
,
573 .hw_perf_counter_disable
= x86_perf_counter_disable
,
574 .hw_perf_counter_read
= x86_perf_counter_read
,
577 const struct hw_perf_counter_ops
*
578 hw_perf_counter_init(struct perf_counter
*counter
)
582 err
= __hw_perf_counter_init(counter
);
586 return &x86_perf_counter_ops
;