2 * Performance counter x86 architecture code
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
11 * For licencing details see kernel-base/COPYING
14 #include <linux/perf_counter.h>
15 #include <linux/capability.h>
16 #include <linux/notifier.h>
17 #include <linux/hardirq.h>
18 #include <linux/kprobes.h>
19 #include <linux/module.h>
20 #include <linux/kdebug.h>
21 #include <linux/sched.h>
22 #include <linux/uaccess.h>
23 #include <linux/highmem.h>
24 #include <linux/cpu.h>
27 #include <asm/stacktrace.h>
30 static u64 perf_counter_mask __read_mostly
;
32 /* The maximal number of PEBS counters: */
33 #define MAX_PEBS_COUNTERS 4
35 /* The size of a BTS record in bytes: */
36 #define BTS_RECORD_SIZE 24
38 /* The size of a per-cpu BTS buffer in bytes: */
39 #define BTS_BUFFER_SIZE (BTS_RECORD_SIZE * 1024)
41 /* The BTS overflow threshold in bytes from the end of the buffer: */
42 #define BTS_OVFL_TH (BTS_RECORD_SIZE * 64)
46 * Bits in the debugctlmsr controlling branch tracing.
48 #define X86_DEBUGCTL_TR (1 << 6)
49 #define X86_DEBUGCTL_BTS (1 << 7)
50 #define X86_DEBUGCTL_BTINT (1 << 8)
51 #define X86_DEBUGCTL_BTS_OFF_OS (1 << 9)
52 #define X86_DEBUGCTL_BTS_OFF_USR (1 << 10)
55 * A debug store configuration.
57 * We only support architectures that use 64bit fields.
62 u64 bts_absolute_maximum
;
63 u64 bts_interrupt_threshold
;
66 u64 pebs_absolute_maximum
;
67 u64 pebs_interrupt_threshold
;
68 u64 pebs_counter_reset
[MAX_PEBS_COUNTERS
];
71 struct cpu_hw_counters
{
72 struct perf_counter
*counters
[X86_PMC_IDX_MAX
];
73 unsigned long used_mask
[BITS_TO_LONGS(X86_PMC_IDX_MAX
)];
74 unsigned long active_mask
[BITS_TO_LONGS(X86_PMC_IDX_MAX
)];
75 unsigned long interrupts
;
77 struct debug_store
*ds
;
81 * struct x86_pmu - generic x86 pmu
86 int (*handle_irq
)(struct pt_regs
*);
87 void (*disable_all
)(void);
88 void (*enable_all
)(void);
89 void (*enable
)(struct hw_perf_counter
*, int);
90 void (*disable
)(struct hw_perf_counter
*, int);
93 u64 (*event_map
)(int);
94 u64 (*raw_event
)(u64
);
97 int num_counters_fixed
;
102 void (*enable_bts
)(u64 config
);
103 void (*disable_bts
)(void);
106 static struct x86_pmu x86_pmu __read_mostly
;
108 static DEFINE_PER_CPU(struct cpu_hw_counters
, cpu_hw_counters
) = {
113 * Not sure about some of these
115 static const u64 p6_perfmon_event_map
[] =
117 [PERF_COUNT_HW_CPU_CYCLES
] = 0x0079,
118 [PERF_COUNT_HW_INSTRUCTIONS
] = 0x00c0,
119 [PERF_COUNT_HW_CACHE_REFERENCES
] = 0x0000,
120 [PERF_COUNT_HW_CACHE_MISSES
] = 0x0000,
121 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = 0x00c4,
122 [PERF_COUNT_HW_BRANCH_MISSES
] = 0x00c5,
123 [PERF_COUNT_HW_BUS_CYCLES
] = 0x0062,
126 static u64
p6_pmu_event_map(int event
)
128 return p6_perfmon_event_map
[event
];
132 * Counter setting that is specified not to count anything.
133 * We use this to effectively disable a counter.
135 * L2_RQSTS with 0 MESI unit mask.
137 #define P6_NOP_COUNTER 0x0000002EULL
139 static u64
p6_pmu_raw_event(u64 event
)
141 #define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
142 #define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
143 #define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
144 #define P6_EVNTSEL_INV_MASK 0x00800000ULL
145 #define P6_EVNTSEL_COUNTER_MASK 0xFF000000ULL
147 #define P6_EVNTSEL_MASK \
148 (P6_EVNTSEL_EVENT_MASK | \
149 P6_EVNTSEL_UNIT_MASK | \
150 P6_EVNTSEL_EDGE_MASK | \
151 P6_EVNTSEL_INV_MASK | \
152 P6_EVNTSEL_COUNTER_MASK)
154 return event
& P6_EVNTSEL_MASK
;
159 * Intel PerfMon v3. Used on Core2 and later.
161 static const u64 intel_perfmon_event_map
[] =
163 [PERF_COUNT_HW_CPU_CYCLES
] = 0x003c,
164 [PERF_COUNT_HW_INSTRUCTIONS
] = 0x00c0,
165 [PERF_COUNT_HW_CACHE_REFERENCES
] = 0x4f2e,
166 [PERF_COUNT_HW_CACHE_MISSES
] = 0x412e,
167 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = 0x00c4,
168 [PERF_COUNT_HW_BRANCH_MISSES
] = 0x00c5,
169 [PERF_COUNT_HW_BUS_CYCLES
] = 0x013c,
172 static u64
intel_pmu_event_map(int event
)
174 return intel_perfmon_event_map
[event
];
178 * Generalized hw caching related event table, filled
179 * in on a per model basis. A value of 0 means
180 * 'not supported', -1 means 'event makes no sense on
181 * this CPU', any other value means the raw event
185 #define C(x) PERF_COUNT_HW_CACHE_##x
187 static u64 __read_mostly hw_cache_event_ids
188 [PERF_COUNT_HW_CACHE_MAX
]
189 [PERF_COUNT_HW_CACHE_OP_MAX
]
190 [PERF_COUNT_HW_CACHE_RESULT_MAX
];
192 static const u64 nehalem_hw_cache_event_ids
193 [PERF_COUNT_HW_CACHE_MAX
]
194 [PERF_COUNT_HW_CACHE_OP_MAX
]
195 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
199 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
200 [ C(RESULT_MISS
) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
203 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
204 [ C(RESULT_MISS
) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
206 [ C(OP_PREFETCH
) ] = {
207 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
208 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
213 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
214 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
217 [ C(RESULT_ACCESS
) ] = -1,
218 [ C(RESULT_MISS
) ] = -1,
220 [ C(OP_PREFETCH
) ] = {
221 [ C(RESULT_ACCESS
) ] = 0x0,
222 [ C(RESULT_MISS
) ] = 0x0,
227 [ C(RESULT_ACCESS
) ] = 0x0324, /* L2_RQSTS.LOADS */
228 [ C(RESULT_MISS
) ] = 0x0224, /* L2_RQSTS.LD_MISS */
231 [ C(RESULT_ACCESS
) ] = 0x0c24, /* L2_RQSTS.RFOS */
232 [ C(RESULT_MISS
) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
234 [ C(OP_PREFETCH
) ] = {
235 [ C(RESULT_ACCESS
) ] = 0x4f2e, /* LLC Reference */
236 [ C(RESULT_MISS
) ] = 0x412e, /* LLC Misses */
241 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
242 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
245 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
246 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
248 [ C(OP_PREFETCH
) ] = {
249 [ C(RESULT_ACCESS
) ] = 0x0,
250 [ C(RESULT_MISS
) ] = 0x0,
255 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
256 [ C(RESULT_MISS
) ] = 0x20c8, /* ITLB_MISS_RETIRED */
259 [ C(RESULT_ACCESS
) ] = -1,
260 [ C(RESULT_MISS
) ] = -1,
262 [ C(OP_PREFETCH
) ] = {
263 [ C(RESULT_ACCESS
) ] = -1,
264 [ C(RESULT_MISS
) ] = -1,
269 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
270 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
273 [ C(RESULT_ACCESS
) ] = -1,
274 [ C(RESULT_MISS
) ] = -1,
276 [ C(OP_PREFETCH
) ] = {
277 [ C(RESULT_ACCESS
) ] = -1,
278 [ C(RESULT_MISS
) ] = -1,
283 static const u64 core2_hw_cache_event_ids
284 [PERF_COUNT_HW_CACHE_MAX
]
285 [PERF_COUNT_HW_CACHE_OP_MAX
]
286 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
290 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
291 [ C(RESULT_MISS
) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
294 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
295 [ C(RESULT_MISS
) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
297 [ C(OP_PREFETCH
) ] = {
298 [ C(RESULT_ACCESS
) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
299 [ C(RESULT_MISS
) ] = 0,
304 [ C(RESULT_ACCESS
) ] = 0x0080, /* L1I.READS */
305 [ C(RESULT_MISS
) ] = 0x0081, /* L1I.MISSES */
308 [ C(RESULT_ACCESS
) ] = -1,
309 [ C(RESULT_MISS
) ] = -1,
311 [ C(OP_PREFETCH
) ] = {
312 [ C(RESULT_ACCESS
) ] = 0,
313 [ C(RESULT_MISS
) ] = 0,
318 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
319 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
322 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
323 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
325 [ C(OP_PREFETCH
) ] = {
326 [ C(RESULT_ACCESS
) ] = 0,
327 [ C(RESULT_MISS
) ] = 0,
332 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
333 [ C(RESULT_MISS
) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
336 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
337 [ C(RESULT_MISS
) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
339 [ C(OP_PREFETCH
) ] = {
340 [ C(RESULT_ACCESS
) ] = 0,
341 [ C(RESULT_MISS
) ] = 0,
346 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
347 [ C(RESULT_MISS
) ] = 0x1282, /* ITLBMISSES */
350 [ C(RESULT_ACCESS
) ] = -1,
351 [ C(RESULT_MISS
) ] = -1,
353 [ C(OP_PREFETCH
) ] = {
354 [ C(RESULT_ACCESS
) ] = -1,
355 [ C(RESULT_MISS
) ] = -1,
360 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
361 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
364 [ C(RESULT_ACCESS
) ] = -1,
365 [ C(RESULT_MISS
) ] = -1,
367 [ C(OP_PREFETCH
) ] = {
368 [ C(RESULT_ACCESS
) ] = -1,
369 [ C(RESULT_MISS
) ] = -1,
374 static const u64 atom_hw_cache_event_ids
375 [PERF_COUNT_HW_CACHE_MAX
]
376 [PERF_COUNT_HW_CACHE_OP_MAX
]
377 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
381 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE.LD */
382 [ C(RESULT_MISS
) ] = 0,
385 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE.ST */
386 [ C(RESULT_MISS
) ] = 0,
388 [ C(OP_PREFETCH
) ] = {
389 [ C(RESULT_ACCESS
) ] = 0x0,
390 [ C(RESULT_MISS
) ] = 0,
395 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
396 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
399 [ C(RESULT_ACCESS
) ] = -1,
400 [ C(RESULT_MISS
) ] = -1,
402 [ C(OP_PREFETCH
) ] = {
403 [ C(RESULT_ACCESS
) ] = 0,
404 [ C(RESULT_MISS
) ] = 0,
409 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
410 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
413 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
414 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
416 [ C(OP_PREFETCH
) ] = {
417 [ C(RESULT_ACCESS
) ] = 0,
418 [ C(RESULT_MISS
) ] = 0,
423 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
424 [ C(RESULT_MISS
) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
427 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
428 [ C(RESULT_MISS
) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
430 [ C(OP_PREFETCH
) ] = {
431 [ C(RESULT_ACCESS
) ] = 0,
432 [ C(RESULT_MISS
) ] = 0,
437 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
438 [ C(RESULT_MISS
) ] = 0x0282, /* ITLB.MISSES */
441 [ C(RESULT_ACCESS
) ] = -1,
442 [ C(RESULT_MISS
) ] = -1,
444 [ C(OP_PREFETCH
) ] = {
445 [ C(RESULT_ACCESS
) ] = -1,
446 [ C(RESULT_MISS
) ] = -1,
451 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
452 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
455 [ C(RESULT_ACCESS
) ] = -1,
456 [ C(RESULT_MISS
) ] = -1,
458 [ C(OP_PREFETCH
) ] = {
459 [ C(RESULT_ACCESS
) ] = -1,
460 [ C(RESULT_MISS
) ] = -1,
465 static u64
intel_pmu_raw_event(u64 event
)
467 #define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
468 #define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
469 #define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
470 #define CORE_EVNTSEL_INV_MASK 0x00800000ULL
471 #define CORE_EVNTSEL_COUNTER_MASK 0xFF000000ULL
473 #define CORE_EVNTSEL_MASK \
474 (CORE_EVNTSEL_EVENT_MASK | \
475 CORE_EVNTSEL_UNIT_MASK | \
476 CORE_EVNTSEL_EDGE_MASK | \
477 CORE_EVNTSEL_INV_MASK | \
478 CORE_EVNTSEL_COUNTER_MASK)
480 return event
& CORE_EVNTSEL_MASK
;
483 static const u64 amd_hw_cache_event_ids
484 [PERF_COUNT_HW_CACHE_MAX
]
485 [PERF_COUNT_HW_CACHE_OP_MAX
]
486 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
490 [ C(RESULT_ACCESS
) ] = 0x0040, /* Data Cache Accesses */
491 [ C(RESULT_MISS
) ] = 0x0041, /* Data Cache Misses */
494 [ C(RESULT_ACCESS
) ] = 0x0142, /* Data Cache Refills :system */
495 [ C(RESULT_MISS
) ] = 0,
497 [ C(OP_PREFETCH
) ] = {
498 [ C(RESULT_ACCESS
) ] = 0x0267, /* Data Prefetcher :attempts */
499 [ C(RESULT_MISS
) ] = 0x0167, /* Data Prefetcher :cancelled */
504 [ C(RESULT_ACCESS
) ] = 0x0080, /* Instruction cache fetches */
505 [ C(RESULT_MISS
) ] = 0x0081, /* Instruction cache misses */
508 [ C(RESULT_ACCESS
) ] = -1,
509 [ C(RESULT_MISS
) ] = -1,
511 [ C(OP_PREFETCH
) ] = {
512 [ C(RESULT_ACCESS
) ] = 0x014B, /* Prefetch Instructions :Load */
513 [ C(RESULT_MISS
) ] = 0,
518 [ C(RESULT_ACCESS
) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
519 [ C(RESULT_MISS
) ] = 0x037E, /* L2 Cache Misses : IC+DC */
522 [ C(RESULT_ACCESS
) ] = 0x017F, /* L2 Fill/Writeback */
523 [ C(RESULT_MISS
) ] = 0,
525 [ C(OP_PREFETCH
) ] = {
526 [ C(RESULT_ACCESS
) ] = 0,
527 [ C(RESULT_MISS
) ] = 0,
532 [ C(RESULT_ACCESS
) ] = 0x0040, /* Data Cache Accesses */
533 [ C(RESULT_MISS
) ] = 0x0046, /* L1 DTLB and L2 DLTB Miss */
536 [ C(RESULT_ACCESS
) ] = 0,
537 [ C(RESULT_MISS
) ] = 0,
539 [ C(OP_PREFETCH
) ] = {
540 [ C(RESULT_ACCESS
) ] = 0,
541 [ C(RESULT_MISS
) ] = 0,
546 [ C(RESULT_ACCESS
) ] = 0x0080, /* Instruction fecthes */
547 [ C(RESULT_MISS
) ] = 0x0085, /* Instr. fetch ITLB misses */
550 [ C(RESULT_ACCESS
) ] = -1,
551 [ C(RESULT_MISS
) ] = -1,
553 [ C(OP_PREFETCH
) ] = {
554 [ C(RESULT_ACCESS
) ] = -1,
555 [ C(RESULT_MISS
) ] = -1,
560 [ C(RESULT_ACCESS
) ] = 0x00c2, /* Retired Branch Instr. */
561 [ C(RESULT_MISS
) ] = 0x00c3, /* Retired Mispredicted BI */
564 [ C(RESULT_ACCESS
) ] = -1,
565 [ C(RESULT_MISS
) ] = -1,
567 [ C(OP_PREFETCH
) ] = {
568 [ C(RESULT_ACCESS
) ] = -1,
569 [ C(RESULT_MISS
) ] = -1,
575 * AMD Performance Monitor K7 and later.
577 static const u64 amd_perfmon_event_map
[] =
579 [PERF_COUNT_HW_CPU_CYCLES
] = 0x0076,
580 [PERF_COUNT_HW_INSTRUCTIONS
] = 0x00c0,
581 [PERF_COUNT_HW_CACHE_REFERENCES
] = 0x0080,
582 [PERF_COUNT_HW_CACHE_MISSES
] = 0x0081,
583 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = 0x00c4,
584 [PERF_COUNT_HW_BRANCH_MISSES
] = 0x00c5,
587 static u64
amd_pmu_event_map(int event
)
589 return amd_perfmon_event_map
[event
];
592 static u64
amd_pmu_raw_event(u64 event
)
594 #define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
595 #define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
596 #define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
597 #define K7_EVNTSEL_INV_MASK 0x000800000ULL
598 #define K7_EVNTSEL_COUNTER_MASK 0x0FF000000ULL
600 #define K7_EVNTSEL_MASK \
601 (K7_EVNTSEL_EVENT_MASK | \
602 K7_EVNTSEL_UNIT_MASK | \
603 K7_EVNTSEL_EDGE_MASK | \
604 K7_EVNTSEL_INV_MASK | \
605 K7_EVNTSEL_COUNTER_MASK)
607 return event
& K7_EVNTSEL_MASK
;
611 * Propagate counter elapsed time into the generic counter.
612 * Can only be executed on the CPU where the counter is active.
613 * Returns the delta events processed.
616 x86_perf_counter_update(struct perf_counter
*counter
,
617 struct hw_perf_counter
*hwc
, int idx
)
619 int shift
= 64 - x86_pmu
.counter_bits
;
620 u64 prev_raw_count
, new_raw_count
;
623 if (idx
== X86_PMC_IDX_FIXED_BTS
)
627 * Careful: an NMI might modify the previous counter value.
629 * Our tactic to handle this is to first atomically read and
630 * exchange a new raw count - then add that new-prev delta
631 * count to the generic counter atomically:
634 prev_raw_count
= atomic64_read(&hwc
->prev_count
);
635 rdmsrl(hwc
->counter_base
+ idx
, new_raw_count
);
637 if (atomic64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
638 new_raw_count
) != prev_raw_count
)
642 * Now we have the new raw value and have updated the prev
643 * timestamp already. We can now calculate the elapsed delta
644 * (counter-)time and add that to the generic counter.
646 * Careful, not all hw sign-extends above the physical width
649 delta
= (new_raw_count
<< shift
) - (prev_raw_count
<< shift
);
652 atomic64_add(delta
, &counter
->count
);
653 atomic64_sub(delta
, &hwc
->period_left
);
655 return new_raw_count
;
658 static atomic_t active_counters
;
659 static DEFINE_MUTEX(pmc_reserve_mutex
);
661 static bool reserve_pmc_hardware(void)
665 if (nmi_watchdog
== NMI_LOCAL_APIC
)
666 disable_lapic_nmi_watchdog();
668 for (i
= 0; i
< x86_pmu
.num_counters
; i
++) {
669 if (!reserve_perfctr_nmi(x86_pmu
.perfctr
+ i
))
673 for (i
= 0; i
< x86_pmu
.num_counters
; i
++) {
674 if (!reserve_evntsel_nmi(x86_pmu
.eventsel
+ i
))
681 for (i
--; i
>= 0; i
--)
682 release_evntsel_nmi(x86_pmu
.eventsel
+ i
);
684 i
= x86_pmu
.num_counters
;
687 for (i
--; i
>= 0; i
--)
688 release_perfctr_nmi(x86_pmu
.perfctr
+ i
);
690 if (nmi_watchdog
== NMI_LOCAL_APIC
)
691 enable_lapic_nmi_watchdog();
696 static void release_pmc_hardware(void)
700 for (i
= 0; i
< x86_pmu
.num_counters
; i
++) {
701 release_perfctr_nmi(x86_pmu
.perfctr
+ i
);
702 release_evntsel_nmi(x86_pmu
.eventsel
+ i
);
705 if (nmi_watchdog
== NMI_LOCAL_APIC
)
706 enable_lapic_nmi_watchdog();
709 static inline bool bts_available(void)
711 return x86_pmu
.enable_bts
!= NULL
;
714 static inline void init_debug_store_on_cpu(int cpu
)
716 struct debug_store
*ds
= per_cpu(cpu_hw_counters
, cpu
).ds
;
721 wrmsr_on_cpu(cpu
, MSR_IA32_DS_AREA
,
722 (u32
)((u64
)(long)ds
), (u32
)((u64
)(long)ds
>> 32));
725 static inline void fini_debug_store_on_cpu(int cpu
)
727 if (!per_cpu(cpu_hw_counters
, cpu
).ds
)
730 wrmsr_on_cpu(cpu
, MSR_IA32_DS_AREA
, 0, 0);
733 static void release_bts_hardware(void)
737 if (!bts_available())
742 for_each_online_cpu(cpu
)
743 fini_debug_store_on_cpu(cpu
);
745 for_each_possible_cpu(cpu
) {
746 struct debug_store
*ds
= per_cpu(cpu_hw_counters
, cpu
).ds
;
751 per_cpu(cpu_hw_counters
, cpu
).ds
= NULL
;
753 kfree((void *)(long)ds
->bts_buffer_base
);
760 static int reserve_bts_hardware(void)
764 if (!bts_available())
769 for_each_possible_cpu(cpu
) {
770 struct debug_store
*ds
;
774 buffer
= kzalloc(BTS_BUFFER_SIZE
, GFP_KERNEL
);
775 if (unlikely(!buffer
))
778 ds
= kzalloc(sizeof(*ds
), GFP_KERNEL
);
784 ds
->bts_buffer_base
= (u64
)(long)buffer
;
785 ds
->bts_index
= ds
->bts_buffer_base
;
786 ds
->bts_absolute_maximum
=
787 ds
->bts_buffer_base
+ BTS_BUFFER_SIZE
;
788 ds
->bts_interrupt_threshold
=
789 ds
->bts_absolute_maximum
- BTS_OVFL_TH
;
791 per_cpu(cpu_hw_counters
, cpu
).ds
= ds
;
796 release_bts_hardware();
798 for_each_online_cpu(cpu
)
799 init_debug_store_on_cpu(cpu
);
807 static void hw_perf_counter_destroy(struct perf_counter
*counter
)
809 if (atomic_dec_and_mutex_lock(&active_counters
, &pmc_reserve_mutex
)) {
810 release_pmc_hardware();
811 release_bts_hardware();
812 mutex_unlock(&pmc_reserve_mutex
);
816 static inline int x86_pmu_initialized(void)
818 return x86_pmu
.handle_irq
!= NULL
;
822 set_ext_hw_attr(struct hw_perf_counter
*hwc
, struct perf_counter_attr
*attr
)
824 unsigned int cache_type
, cache_op
, cache_result
;
827 config
= attr
->config
;
829 cache_type
= (config
>> 0) & 0xff;
830 if (cache_type
>= PERF_COUNT_HW_CACHE_MAX
)
833 cache_op
= (config
>> 8) & 0xff;
834 if (cache_op
>= PERF_COUNT_HW_CACHE_OP_MAX
)
837 cache_result
= (config
>> 16) & 0xff;
838 if (cache_result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
841 val
= hw_cache_event_ids
[cache_type
][cache_op
][cache_result
];
854 static void intel_pmu_enable_bts(u64 config
)
856 unsigned long debugctlmsr
;
858 debugctlmsr
= get_debugctlmsr();
860 debugctlmsr
|= X86_DEBUGCTL_TR
;
861 debugctlmsr
|= X86_DEBUGCTL_BTS
;
862 debugctlmsr
|= X86_DEBUGCTL_BTINT
;
864 if (!(config
& ARCH_PERFMON_EVENTSEL_OS
))
865 debugctlmsr
|= X86_DEBUGCTL_BTS_OFF_OS
;
867 if (!(config
& ARCH_PERFMON_EVENTSEL_USR
))
868 debugctlmsr
|= X86_DEBUGCTL_BTS_OFF_USR
;
870 update_debugctlmsr(debugctlmsr
);
873 static void intel_pmu_disable_bts(void)
875 struct cpu_hw_counters
*cpuc
= &__get_cpu_var(cpu_hw_counters
);
876 unsigned long debugctlmsr
;
881 debugctlmsr
= get_debugctlmsr();
884 ~(X86_DEBUGCTL_TR
| X86_DEBUGCTL_BTS
| X86_DEBUGCTL_BTINT
|
885 X86_DEBUGCTL_BTS_OFF_OS
| X86_DEBUGCTL_BTS_OFF_USR
);
887 update_debugctlmsr(debugctlmsr
);
891 * Setup the hardware configuration for a given attr_type
893 static int __hw_perf_counter_init(struct perf_counter
*counter
)
895 struct perf_counter_attr
*attr
= &counter
->attr
;
896 struct hw_perf_counter
*hwc
= &counter
->hw
;
900 if (!x86_pmu_initialized())
904 if (!atomic_inc_not_zero(&active_counters
)) {
905 mutex_lock(&pmc_reserve_mutex
);
906 if (atomic_read(&active_counters
) == 0) {
907 if (!reserve_pmc_hardware())
910 reserve_bts_hardware();
913 atomic_inc(&active_counters
);
914 mutex_unlock(&pmc_reserve_mutex
);
921 * (keep 'enabled' bit clear for now)
923 hwc
->config
= ARCH_PERFMON_EVENTSEL_INT
;
926 * Count user and OS events unless requested not to.
928 if (!attr
->exclude_user
)
929 hwc
->config
|= ARCH_PERFMON_EVENTSEL_USR
;
930 if (!attr
->exclude_kernel
)
931 hwc
->config
|= ARCH_PERFMON_EVENTSEL_OS
;
933 if (!hwc
->sample_period
) {
934 hwc
->sample_period
= x86_pmu
.max_period
;
935 hwc
->last_period
= hwc
->sample_period
;
936 atomic64_set(&hwc
->period_left
, hwc
->sample_period
);
939 counter
->destroy
= hw_perf_counter_destroy
;
942 * Raw event type provide the config in the event structure
944 if (attr
->type
== PERF_TYPE_RAW
) {
945 hwc
->config
|= x86_pmu
.raw_event(attr
->config
);
949 if (attr
->type
== PERF_TYPE_HW_CACHE
)
950 return set_ext_hw_attr(hwc
, attr
);
952 if (attr
->config
>= x86_pmu
.max_events
)
958 config
= x86_pmu
.event_map(attr
->config
);
966 hwc
->config
|= config
;
971 static void p6_pmu_disable_all(void)
973 struct cpu_hw_counters
*cpuc
= &__get_cpu_var(cpu_hw_counters
);
982 /* p6 only has one enable register */
983 rdmsrl(MSR_P6_EVNTSEL0
, val
);
984 val
&= ~ARCH_PERFMON_EVENTSEL0_ENABLE
;
985 wrmsrl(MSR_P6_EVNTSEL0
, val
);
988 static void intel_pmu_disable_all(void)
990 struct cpu_hw_counters
*cpuc
= &__get_cpu_var(cpu_hw_counters
);
998 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0);
1000 if (test_bit(X86_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
))
1001 intel_pmu_disable_bts();
1004 static void amd_pmu_disable_all(void)
1006 struct cpu_hw_counters
*cpuc
= &__get_cpu_var(cpu_hw_counters
);
1014 * ensure we write the disable before we start disabling the
1015 * counters proper, so that amd_pmu_enable_counter() does the
1020 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1023 if (!test_bit(idx
, cpuc
->active_mask
))
1025 rdmsrl(MSR_K7_EVNTSEL0
+ idx
, val
);
1026 if (!(val
& ARCH_PERFMON_EVENTSEL0_ENABLE
))
1028 val
&= ~ARCH_PERFMON_EVENTSEL0_ENABLE
;
1029 wrmsrl(MSR_K7_EVNTSEL0
+ idx
, val
);
1033 void hw_perf_disable(void)
1035 if (!x86_pmu_initialized())
1037 return x86_pmu
.disable_all();
1040 static void p6_pmu_enable_all(void)
1042 struct cpu_hw_counters
*cpuc
= &__get_cpu_var(cpu_hw_counters
);
1051 /* p6 only has one enable register */
1052 rdmsrl(MSR_P6_EVNTSEL0
, val
);
1053 val
|= ARCH_PERFMON_EVENTSEL0_ENABLE
;
1054 wrmsrl(MSR_P6_EVNTSEL0
, val
);
1057 static void intel_pmu_enable_all(void)
1059 struct cpu_hw_counters
*cpuc
= &__get_cpu_var(cpu_hw_counters
);
1067 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, x86_pmu
.intel_ctrl
);
1069 if (test_bit(X86_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
)) {
1070 struct perf_counter
*counter
=
1071 cpuc
->counters
[X86_PMC_IDX_FIXED_BTS
];
1073 if (WARN_ON_ONCE(!counter
))
1076 intel_pmu_enable_bts(counter
->hw
.config
);
1080 static void amd_pmu_enable_all(void)
1082 struct cpu_hw_counters
*cpuc
= &__get_cpu_var(cpu_hw_counters
);
1091 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1092 struct perf_counter
*counter
= cpuc
->counters
[idx
];
1095 if (!test_bit(idx
, cpuc
->active_mask
))
1098 val
= counter
->hw
.config
;
1099 val
|= ARCH_PERFMON_EVENTSEL0_ENABLE
;
1100 wrmsrl(MSR_K7_EVNTSEL0
+ idx
, val
);
1104 void hw_perf_enable(void)
1106 if (!x86_pmu_initialized())
1108 x86_pmu
.enable_all();
1111 static inline u64
intel_pmu_get_status(void)
1115 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS
, status
);
1120 static inline void intel_pmu_ack_status(u64 ack
)
1122 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL
, ack
);
1125 static inline void x86_pmu_enable_counter(struct hw_perf_counter
*hwc
, int idx
)
1127 (void)checking_wrmsrl(hwc
->config_base
+ idx
,
1128 hwc
->config
| ARCH_PERFMON_EVENTSEL0_ENABLE
);
1131 static inline void x86_pmu_disable_counter(struct hw_perf_counter
*hwc
, int idx
)
1133 (void)checking_wrmsrl(hwc
->config_base
+ idx
, hwc
->config
);
1137 intel_pmu_disable_fixed(struct hw_perf_counter
*hwc
, int __idx
)
1139 int idx
= __idx
- X86_PMC_IDX_FIXED
;
1142 mask
= 0xfULL
<< (idx
* 4);
1144 rdmsrl(hwc
->config_base
, ctrl_val
);
1146 (void)checking_wrmsrl(hwc
->config_base
, ctrl_val
);
1150 p6_pmu_disable_counter(struct hw_perf_counter
*hwc
, int idx
)
1152 struct cpu_hw_counters
*cpuc
= &__get_cpu_var(cpu_hw_counters
);
1153 u64 val
= P6_NOP_COUNTER
;
1156 val
|= ARCH_PERFMON_EVENTSEL0_ENABLE
;
1158 (void)checking_wrmsrl(hwc
->config_base
+ idx
, val
);
1162 intel_pmu_disable_counter(struct hw_perf_counter
*hwc
, int idx
)
1164 if (unlikely(idx
== X86_PMC_IDX_FIXED_BTS
)) {
1165 intel_pmu_disable_bts();
1169 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
1170 intel_pmu_disable_fixed(hwc
, idx
);
1174 x86_pmu_disable_counter(hwc
, idx
);
1178 amd_pmu_disable_counter(struct hw_perf_counter
*hwc
, int idx
)
1180 x86_pmu_disable_counter(hwc
, idx
);
1183 static DEFINE_PER_CPU(u64
, prev_left
[X86_PMC_IDX_MAX
]);
1186 * Set the next IRQ period, based on the hwc->period_left value.
1187 * To be called with the counter disabled in hw:
1190 x86_perf_counter_set_period(struct perf_counter
*counter
,
1191 struct hw_perf_counter
*hwc
, int idx
)
1193 s64 left
= atomic64_read(&hwc
->period_left
);
1194 s64 period
= hwc
->sample_period
;
1197 if (idx
== X86_PMC_IDX_FIXED_BTS
)
1201 * If we are way outside a reasoable range then just skip forward:
1203 if (unlikely(left
<= -period
)) {
1205 atomic64_set(&hwc
->period_left
, left
);
1206 hwc
->last_period
= period
;
1210 if (unlikely(left
<= 0)) {
1212 atomic64_set(&hwc
->period_left
, left
);
1213 hwc
->last_period
= period
;
1217 * Quirk: certain CPUs dont like it if just 1 event is left:
1219 if (unlikely(left
< 2))
1222 if (left
> x86_pmu
.max_period
)
1223 left
= x86_pmu
.max_period
;
1225 per_cpu(prev_left
[idx
], smp_processor_id()) = left
;
1228 * The hw counter starts counting from this counter offset,
1229 * mark it to be able to extra future deltas:
1231 atomic64_set(&hwc
->prev_count
, (u64
)-left
);
1233 err
= checking_wrmsrl(hwc
->counter_base
+ idx
,
1234 (u64
)(-left
) & x86_pmu
.counter_mask
);
1236 perf_counter_update_userpage(counter
);
1242 intel_pmu_enable_fixed(struct hw_perf_counter
*hwc
, int __idx
)
1244 int idx
= __idx
- X86_PMC_IDX_FIXED
;
1245 u64 ctrl_val
, bits
, mask
;
1249 * Enable IRQ generation (0x8),
1250 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1254 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_USR
)
1256 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_OS
)
1259 mask
= 0xfULL
<< (idx
* 4);
1261 rdmsrl(hwc
->config_base
, ctrl_val
);
1264 err
= checking_wrmsrl(hwc
->config_base
, ctrl_val
);
1267 static void p6_pmu_enable_counter(struct hw_perf_counter
*hwc
, int idx
)
1269 struct cpu_hw_counters
*cpuc
= &__get_cpu_var(cpu_hw_counters
);
1274 val
|= ARCH_PERFMON_EVENTSEL0_ENABLE
;
1276 (void)checking_wrmsrl(hwc
->config_base
+ idx
, val
);
1280 static void intel_pmu_enable_counter(struct hw_perf_counter
*hwc
, int idx
)
1282 if (unlikely(idx
== X86_PMC_IDX_FIXED_BTS
)) {
1283 if (!__get_cpu_var(cpu_hw_counters
).enabled
)
1286 intel_pmu_enable_bts(hwc
->config
);
1290 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
1291 intel_pmu_enable_fixed(hwc
, idx
);
1295 x86_pmu_enable_counter(hwc
, idx
);
1298 static void amd_pmu_enable_counter(struct hw_perf_counter
*hwc
, int idx
)
1300 struct cpu_hw_counters
*cpuc
= &__get_cpu_var(cpu_hw_counters
);
1303 x86_pmu_enable_counter(hwc
, idx
);
1307 fixed_mode_idx(struct perf_counter
*counter
, struct hw_perf_counter
*hwc
)
1311 event
= hwc
->config
& ARCH_PERFMON_EVENT_MASK
;
1313 if (unlikely((event
==
1314 x86_pmu
.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS
)) &&
1315 (hwc
->sample_period
== 1)))
1316 return X86_PMC_IDX_FIXED_BTS
;
1318 if (!x86_pmu
.num_counters_fixed
)
1321 if (unlikely(event
== x86_pmu
.event_map(PERF_COUNT_HW_INSTRUCTIONS
)))
1322 return X86_PMC_IDX_FIXED_INSTRUCTIONS
;
1323 if (unlikely(event
== x86_pmu
.event_map(PERF_COUNT_HW_CPU_CYCLES
)))
1324 return X86_PMC_IDX_FIXED_CPU_CYCLES
;
1325 if (unlikely(event
== x86_pmu
.event_map(PERF_COUNT_HW_BUS_CYCLES
)))
1326 return X86_PMC_IDX_FIXED_BUS_CYCLES
;
1332 * Find a PMC slot for the freshly enabled / scheduled in counter:
1334 static int x86_pmu_enable(struct perf_counter
*counter
)
1336 struct cpu_hw_counters
*cpuc
= &__get_cpu_var(cpu_hw_counters
);
1337 struct hw_perf_counter
*hwc
= &counter
->hw
;
1340 idx
= fixed_mode_idx(counter
, hwc
);
1341 if (idx
== X86_PMC_IDX_FIXED_BTS
) {
1343 * Try to use BTS for branch tracing. If that is not
1344 * available, try to get a generic counter.
1346 if (unlikely(!cpuc
->ds
))
1350 * Try to get the fixed counter, if that is already taken
1351 * then try to get a generic counter:
1353 if (test_and_set_bit(idx
, cpuc
->used_mask
))
1356 hwc
->config_base
= 0;
1357 hwc
->counter_base
= 0;
1359 } else if (idx
>= 0) {
1361 * Try to get the fixed counter, if that is already taken
1362 * then try to get a generic counter:
1364 if (test_and_set_bit(idx
, cpuc
->used_mask
))
1367 hwc
->config_base
= MSR_ARCH_PERFMON_FIXED_CTR_CTRL
;
1369 * We set it so that counter_base + idx in wrmsr/rdmsr maps to
1370 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
1373 MSR_ARCH_PERFMON_FIXED_CTR0
- X86_PMC_IDX_FIXED
;
1377 /* Try to get the previous generic counter again */
1378 if (test_and_set_bit(idx
, cpuc
->used_mask
)) {
1380 idx
= find_first_zero_bit(cpuc
->used_mask
,
1381 x86_pmu
.num_counters
);
1382 if (idx
== x86_pmu
.num_counters
)
1385 set_bit(idx
, cpuc
->used_mask
);
1388 hwc
->config_base
= x86_pmu
.eventsel
;
1389 hwc
->counter_base
= x86_pmu
.perfctr
;
1392 perf_counters_lapic_init();
1394 x86_pmu
.disable(hwc
, idx
);
1396 cpuc
->counters
[idx
] = counter
;
1397 set_bit(idx
, cpuc
->active_mask
);
1399 x86_perf_counter_set_period(counter
, hwc
, idx
);
1400 x86_pmu
.enable(hwc
, idx
);
1402 perf_counter_update_userpage(counter
);
1407 static void x86_pmu_unthrottle(struct perf_counter
*counter
)
1409 struct cpu_hw_counters
*cpuc
= &__get_cpu_var(cpu_hw_counters
);
1410 struct hw_perf_counter
*hwc
= &counter
->hw
;
1412 if (WARN_ON_ONCE(hwc
->idx
>= X86_PMC_IDX_MAX
||
1413 cpuc
->counters
[hwc
->idx
] != counter
))
1416 x86_pmu
.enable(hwc
, hwc
->idx
);
1419 void perf_counter_print_debug(void)
1421 u64 ctrl
, status
, overflow
, pmc_ctrl
, pmc_count
, prev_left
, fixed
;
1422 struct cpu_hw_counters
*cpuc
;
1423 unsigned long flags
;
1426 if (!x86_pmu
.num_counters
)
1429 local_irq_save(flags
);
1431 cpu
= smp_processor_id();
1432 cpuc
= &per_cpu(cpu_hw_counters
, cpu
);
1434 if (x86_pmu
.version
>= 2) {
1435 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, ctrl
);
1436 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS
, status
);
1437 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL
, overflow
);
1438 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL
, fixed
);
1441 pr_info("CPU#%d: ctrl: %016llx\n", cpu
, ctrl
);
1442 pr_info("CPU#%d: status: %016llx\n", cpu
, status
);
1443 pr_info("CPU#%d: overflow: %016llx\n", cpu
, overflow
);
1444 pr_info("CPU#%d: fixed: %016llx\n", cpu
, fixed
);
1446 pr_info("CPU#%d: used: %016llx\n", cpu
, *(u64
*)cpuc
->used_mask
);
1448 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1449 rdmsrl(x86_pmu
.eventsel
+ idx
, pmc_ctrl
);
1450 rdmsrl(x86_pmu
.perfctr
+ idx
, pmc_count
);
1452 prev_left
= per_cpu(prev_left
[idx
], cpu
);
1454 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
1455 cpu
, idx
, pmc_ctrl
);
1456 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
1457 cpu
, idx
, pmc_count
);
1458 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
1459 cpu
, idx
, prev_left
);
1461 for (idx
= 0; idx
< x86_pmu
.num_counters_fixed
; idx
++) {
1462 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0
+ idx
, pmc_count
);
1464 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
1465 cpu
, idx
, pmc_count
);
1467 local_irq_restore(flags
);
1470 static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters
*cpuc
,
1471 struct perf_sample_data
*data
)
1473 struct debug_store
*ds
= cpuc
->ds
;
1479 struct perf_counter
*counter
= cpuc
->counters
[X86_PMC_IDX_FIXED_BTS
];
1480 unsigned long orig_ip
= data
->regs
->ip
;
1489 for (at
= ds
->bts_buffer_base
;
1491 at
+= sizeof(struct bts_record
)) {
1492 struct bts_record
*rec
= (struct bts_record
*)(long)at
;
1494 data
->regs
->ip
= rec
->from
;
1495 data
->addr
= rec
->to
;
1497 perf_counter_output(counter
, 1, data
);
1500 ds
->bts_index
= ds
->bts_buffer_base
;
1502 data
->regs
->ip
= orig_ip
;
1505 /* There's new data available. */
1506 counter
->pending_kill
= POLL_IN
;
1509 static void x86_pmu_disable(struct perf_counter
*counter
)
1511 struct cpu_hw_counters
*cpuc
= &__get_cpu_var(cpu_hw_counters
);
1512 struct hw_perf_counter
*hwc
= &counter
->hw
;
1516 * Must be done before we disable, otherwise the nmi handler
1517 * could reenable again:
1519 clear_bit(idx
, cpuc
->active_mask
);
1520 x86_pmu
.disable(hwc
, idx
);
1523 * Make sure the cleared pointer becomes visible before we
1524 * (potentially) free the counter:
1529 * Drain the remaining delta count out of a counter
1530 * that we are disabling:
1532 x86_perf_counter_update(counter
, hwc
, idx
);
1534 /* Drain the remaining BTS records. */
1535 if (unlikely(idx
== X86_PMC_IDX_FIXED_BTS
)) {
1536 struct perf_sample_data data
;
1537 struct pt_regs regs
;
1540 intel_pmu_drain_bts_buffer(cpuc
, &data
);
1542 cpuc
->counters
[idx
] = NULL
;
1543 clear_bit(idx
, cpuc
->used_mask
);
1545 perf_counter_update_userpage(counter
);
1549 * Save and restart an expired counter. Called by NMI contexts,
1550 * so it has to be careful about preempting normal counter ops:
1552 static int intel_pmu_save_and_restart(struct perf_counter
*counter
)
1554 struct hw_perf_counter
*hwc
= &counter
->hw
;
1558 x86_perf_counter_update(counter
, hwc
, idx
);
1559 ret
= x86_perf_counter_set_period(counter
, hwc
, idx
);
1561 if (counter
->state
== PERF_COUNTER_STATE_ACTIVE
)
1562 intel_pmu_enable_counter(hwc
, idx
);
1567 static void intel_pmu_reset(void)
1569 struct debug_store
*ds
= __get_cpu_var(cpu_hw_counters
).ds
;
1570 unsigned long flags
;
1573 if (!x86_pmu
.num_counters
)
1576 local_irq_save(flags
);
1578 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
1580 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1581 checking_wrmsrl(x86_pmu
.eventsel
+ idx
, 0ull);
1582 checking_wrmsrl(x86_pmu
.perfctr
+ idx
, 0ull);
1584 for (idx
= 0; idx
< x86_pmu
.num_counters_fixed
; idx
++) {
1585 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0
+ idx
, 0ull);
1588 ds
->bts_index
= ds
->bts_buffer_base
;
1590 local_irq_restore(flags
);
1593 static int p6_pmu_handle_irq(struct pt_regs
*regs
)
1595 struct perf_sample_data data
;
1596 struct cpu_hw_counters
*cpuc
;
1597 struct perf_counter
*counter
;
1598 struct hw_perf_counter
*hwc
;
1599 int idx
, handled
= 0;
1605 cpuc
= &__get_cpu_var(cpu_hw_counters
);
1607 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1608 if (!test_bit(idx
, cpuc
->active_mask
))
1611 counter
= cpuc
->counters
[idx
];
1614 val
= x86_perf_counter_update(counter
, hwc
, idx
);
1615 if (val
& (1ULL << (x86_pmu
.counter_bits
- 1)))
1622 data
.period
= counter
->hw
.last_period
;
1624 if (!x86_perf_counter_set_period(counter
, hwc
, idx
))
1627 if (perf_counter_overflow(counter
, 1, &data
))
1628 p6_pmu_disable_counter(hwc
, idx
);
1632 inc_irq_stat(apic_perf_irqs
);
1638 * This handler is triggered by the local APIC, so the APIC IRQ handling
1641 static int intel_pmu_handle_irq(struct pt_regs
*regs
)
1643 struct perf_sample_data data
;
1644 struct cpu_hw_counters
*cpuc
;
1651 cpuc
= &__get_cpu_var(cpu_hw_counters
);
1654 intel_pmu_drain_bts_buffer(cpuc
, &data
);
1655 status
= intel_pmu_get_status();
1663 if (++loops
> 100) {
1664 WARN_ONCE(1, "perfcounters: irq loop stuck!\n");
1665 perf_counter_print_debug();
1671 inc_irq_stat(apic_perf_irqs
);
1673 for_each_bit(bit
, (unsigned long *)&status
, X86_PMC_IDX_MAX
) {
1674 struct perf_counter
*counter
= cpuc
->counters
[bit
];
1676 clear_bit(bit
, (unsigned long *) &status
);
1677 if (!test_bit(bit
, cpuc
->active_mask
))
1680 if (!intel_pmu_save_and_restart(counter
))
1683 data
.period
= counter
->hw
.last_period
;
1685 if (perf_counter_overflow(counter
, 1, &data
))
1686 intel_pmu_disable_counter(&counter
->hw
, bit
);
1689 intel_pmu_ack_status(ack
);
1692 * Repeat if there is more work to be done:
1694 status
= intel_pmu_get_status();
1703 static int amd_pmu_handle_irq(struct pt_regs
*regs
)
1705 struct perf_sample_data data
;
1706 struct cpu_hw_counters
*cpuc
;
1707 struct perf_counter
*counter
;
1708 struct hw_perf_counter
*hwc
;
1709 int idx
, handled
= 0;
1715 cpuc
= &__get_cpu_var(cpu_hw_counters
);
1717 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1718 if (!test_bit(idx
, cpuc
->active_mask
))
1721 counter
= cpuc
->counters
[idx
];
1724 val
= x86_perf_counter_update(counter
, hwc
, idx
);
1725 if (val
& (1ULL << (x86_pmu
.counter_bits
- 1)))
1732 data
.period
= counter
->hw
.last_period
;
1734 if (!x86_perf_counter_set_period(counter
, hwc
, idx
))
1737 if (perf_counter_overflow(counter
, 1, &data
))
1738 amd_pmu_disable_counter(hwc
, idx
);
1742 inc_irq_stat(apic_perf_irqs
);
1747 void smp_perf_pending_interrupt(struct pt_regs
*regs
)
1751 inc_irq_stat(apic_pending_irqs
);
1752 perf_counter_do_pending();
1756 void set_perf_counter_pending(void)
1758 apic
->send_IPI_self(LOCAL_PENDING_VECTOR
);
1761 void perf_counters_lapic_init(void)
1763 if (!x86_pmu_initialized())
1767 * Always use NMI for PMU
1769 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
1772 static int __kprobes
1773 perf_counter_nmi_handler(struct notifier_block
*self
,
1774 unsigned long cmd
, void *__args
)
1776 struct die_args
*args
= __args
;
1777 struct pt_regs
*regs
;
1779 if (!atomic_read(&active_counters
))
1793 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
1795 * Can't rely on the handled return value to say it was our NMI, two
1796 * counters could trigger 'simultaneously' raising two back-to-back NMIs.
1798 * If the first NMI handles both, the latter will be empty and daze
1801 x86_pmu
.handle_irq(regs
);
1806 static __read_mostly
struct notifier_block perf_counter_nmi_notifier
= {
1807 .notifier_call
= perf_counter_nmi_handler
,
1812 static struct x86_pmu p6_pmu
= {
1814 .handle_irq
= p6_pmu_handle_irq
,
1815 .disable_all
= p6_pmu_disable_all
,
1816 .enable_all
= p6_pmu_enable_all
,
1817 .enable
= p6_pmu_enable_counter
,
1818 .disable
= p6_pmu_disable_counter
,
1819 .eventsel
= MSR_P6_EVNTSEL0
,
1820 .perfctr
= MSR_P6_PERFCTR0
,
1821 .event_map
= p6_pmu_event_map
,
1822 .raw_event
= p6_pmu_raw_event
,
1823 .max_events
= ARRAY_SIZE(p6_perfmon_event_map
),
1824 .max_period
= (1ULL << 31) - 1,
1828 * Counters have 40 bits implemented. However they are designed such
1829 * that bits [32-39] are sign extensions of bit 31. As such the
1830 * effective width of a counter for P6-like PMU is 32 bits only.
1832 * See IA-32 Intel Architecture Software developer manual Vol 3B
1835 .counter_mask
= (1ULL << 32) - 1,
1838 static struct x86_pmu intel_pmu
= {
1840 .handle_irq
= intel_pmu_handle_irq
,
1841 .disable_all
= intel_pmu_disable_all
,
1842 .enable_all
= intel_pmu_enable_all
,
1843 .enable
= intel_pmu_enable_counter
,
1844 .disable
= intel_pmu_disable_counter
,
1845 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
1846 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
1847 .event_map
= intel_pmu_event_map
,
1848 .raw_event
= intel_pmu_raw_event
,
1849 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
1851 * Intel PMCs cannot be accessed sanely above 32 bit width,
1852 * so we install an artificial 1<<31 period regardless of
1853 * the generic counter period:
1855 .max_period
= (1ULL << 31) - 1,
1856 .enable_bts
= intel_pmu_enable_bts
,
1857 .disable_bts
= intel_pmu_disable_bts
,
1860 static struct x86_pmu amd_pmu
= {
1862 .handle_irq
= amd_pmu_handle_irq
,
1863 .disable_all
= amd_pmu_disable_all
,
1864 .enable_all
= amd_pmu_enable_all
,
1865 .enable
= amd_pmu_enable_counter
,
1866 .disable
= amd_pmu_disable_counter
,
1867 .eventsel
= MSR_K7_EVNTSEL0
,
1868 .perfctr
= MSR_K7_PERFCTR0
,
1869 .event_map
= amd_pmu_event_map
,
1870 .raw_event
= amd_pmu_raw_event
,
1871 .max_events
= ARRAY_SIZE(amd_perfmon_event_map
),
1874 .counter_mask
= (1ULL << 48) - 1,
1875 /* use highest bit to detect overflow */
1876 .max_period
= (1ULL << 47) - 1,
1879 static int p6_pmu_init(void)
1881 switch (boot_cpu_data
.x86_model
) {
1883 case 3: /* Pentium Pro */
1885 case 6: /* Pentium II */
1888 case 11: /* Pentium III */
1895 pr_cont("unsupported p6 CPU model %d ",
1896 boot_cpu_data
.x86_model
);
1900 if (!cpu_has_apic
) {
1901 pr_info("no Local APIC, try rebooting with lapic");
1910 static int intel_pmu_init(void)
1912 union cpuid10_edx edx
;
1913 union cpuid10_eax eax
;
1914 unsigned int unused
;
1918 if (!cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
)) {
1919 /* check for P6 processor family */
1920 if (boot_cpu_data
.x86
== 6) {
1921 return p6_pmu_init();
1928 * Check whether the Architectural PerfMon supports
1929 * Branch Misses Retired Event or not.
1931 cpuid(10, &eax
.full
, &ebx
, &unused
, &edx
.full
);
1932 if (eax
.split
.mask_length
<= ARCH_PERFMON_BRANCH_MISSES_RETIRED
)
1935 version
= eax
.split
.version_id
;
1939 x86_pmu
= intel_pmu
;
1940 x86_pmu
.version
= version
;
1941 x86_pmu
.num_counters
= eax
.split
.num_counters
;
1942 x86_pmu
.counter_bits
= eax
.split
.bit_width
;
1943 x86_pmu
.counter_mask
= (1ULL << eax
.split
.bit_width
) - 1;
1946 * Quirk: v2 perfmon does not report fixed-purpose counters, so
1947 * assume at least 3 counters:
1949 x86_pmu
.num_counters_fixed
= max((int)edx
.split
.num_counters_fixed
, 3);
1952 * Install the hw-cache-events table:
1954 switch (boot_cpu_data
.x86_model
) {
1955 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
1956 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
1957 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
1958 case 29: /* six-core 45 nm xeon "Dunnington" */
1959 memcpy(hw_cache_event_ids
, core2_hw_cache_event_ids
,
1960 sizeof(hw_cache_event_ids
));
1962 pr_cont("Core2 events, ");
1966 memcpy(hw_cache_event_ids
, nehalem_hw_cache_event_ids
,
1967 sizeof(hw_cache_event_ids
));
1969 pr_cont("Nehalem/Corei7 events, ");
1972 memcpy(hw_cache_event_ids
, atom_hw_cache_event_ids
,
1973 sizeof(hw_cache_event_ids
));
1975 pr_cont("Atom events, ");
1981 static int amd_pmu_init(void)
1983 /* Performance-monitoring supported from K7 and later: */
1984 if (boot_cpu_data
.x86
< 6)
1989 /* Events are common for all AMDs */
1990 memcpy(hw_cache_event_ids
, amd_hw_cache_event_ids
,
1991 sizeof(hw_cache_event_ids
));
1996 void __init
init_hw_perf_counters(void)
2000 pr_info("Performance Counters: ");
2002 switch (boot_cpu_data
.x86_vendor
) {
2003 case X86_VENDOR_INTEL
:
2004 err
= intel_pmu_init();
2006 case X86_VENDOR_AMD
:
2007 err
= amd_pmu_init();
2013 pr_cont("no PMU driver, software counters only.\n");
2017 pr_cont("%s PMU driver.\n", x86_pmu
.name
);
2019 if (x86_pmu
.num_counters
> X86_PMC_MAX_GENERIC
) {
2020 WARN(1, KERN_ERR
"hw perf counters %d > max(%d), clipping!",
2021 x86_pmu
.num_counters
, X86_PMC_MAX_GENERIC
);
2022 x86_pmu
.num_counters
= X86_PMC_MAX_GENERIC
;
2024 perf_counter_mask
= (1 << x86_pmu
.num_counters
) - 1;
2025 perf_max_counters
= x86_pmu
.num_counters
;
2027 if (x86_pmu
.num_counters_fixed
> X86_PMC_MAX_FIXED
) {
2028 WARN(1, KERN_ERR
"hw perf counters fixed %d > max(%d), clipping!",
2029 x86_pmu
.num_counters_fixed
, X86_PMC_MAX_FIXED
);
2030 x86_pmu
.num_counters_fixed
= X86_PMC_MAX_FIXED
;
2033 perf_counter_mask
|=
2034 ((1LL << x86_pmu
.num_counters_fixed
)-1) << X86_PMC_IDX_FIXED
;
2035 x86_pmu
.intel_ctrl
= perf_counter_mask
;
2037 perf_counters_lapic_init();
2038 register_die_notifier(&perf_counter_nmi_notifier
);
2040 pr_info("... version: %d\n", x86_pmu
.version
);
2041 pr_info("... bit width: %d\n", x86_pmu
.counter_bits
);
2042 pr_info("... generic counters: %d\n", x86_pmu
.num_counters
);
2043 pr_info("... value mask: %016Lx\n", x86_pmu
.counter_mask
);
2044 pr_info("... max period: %016Lx\n", x86_pmu
.max_period
);
2045 pr_info("... fixed-purpose counters: %d\n", x86_pmu
.num_counters_fixed
);
2046 pr_info("... counter mask: %016Lx\n", perf_counter_mask
);
2049 static inline void x86_pmu_read(struct perf_counter
*counter
)
2051 x86_perf_counter_update(counter
, &counter
->hw
, counter
->hw
.idx
);
2054 static const struct pmu pmu
= {
2055 .enable
= x86_pmu_enable
,
2056 .disable
= x86_pmu_disable
,
2057 .read
= x86_pmu_read
,
2058 .unthrottle
= x86_pmu_unthrottle
,
2061 const struct pmu
*hw_perf_counter_init(struct perf_counter
*counter
)
2065 err
= __hw_perf_counter_init(counter
);
2067 return ERR_PTR(err
);
2077 void callchain_store(struct perf_callchain_entry
*entry
, u64 ip
)
2079 if (entry
->nr
< PERF_MAX_STACK_DEPTH
)
2080 entry
->ip
[entry
->nr
++] = ip
;
2083 static DEFINE_PER_CPU(struct perf_callchain_entry
, irq_entry
);
2084 static DEFINE_PER_CPU(struct perf_callchain_entry
, nmi_entry
);
2085 static DEFINE_PER_CPU(int, in_nmi_frame
);
2089 backtrace_warning_symbol(void *data
, char *msg
, unsigned long symbol
)
2091 /* Ignore warnings */
2094 static void backtrace_warning(void *data
, char *msg
)
2096 /* Ignore warnings */
2099 static int backtrace_stack(void *data
, char *name
)
2101 per_cpu(in_nmi_frame
, smp_processor_id()) =
2102 x86_is_stack_id(NMI_STACK
, name
);
2107 static void backtrace_address(void *data
, unsigned long addr
, int reliable
)
2109 struct perf_callchain_entry
*entry
= data
;
2111 if (per_cpu(in_nmi_frame
, smp_processor_id()))
2115 callchain_store(entry
, addr
);
2118 static const struct stacktrace_ops backtrace_ops
= {
2119 .warning
= backtrace_warning
,
2120 .warning_symbol
= backtrace_warning_symbol
,
2121 .stack
= backtrace_stack
,
2122 .address
= backtrace_address
,
2125 #include "../dumpstack.h"
2128 perf_callchain_kernel(struct pt_regs
*regs
, struct perf_callchain_entry
*entry
)
2130 callchain_store(entry
, PERF_CONTEXT_KERNEL
);
2131 callchain_store(entry
, regs
->ip
);
2133 dump_trace(NULL
, regs
, NULL
, 0, &backtrace_ops
, entry
);
2137 * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
2139 static unsigned long
2140 copy_from_user_nmi(void *to
, const void __user
*from
, unsigned long n
)
2142 unsigned long offset
, addr
= (unsigned long)from
;
2143 int type
= in_nmi() ? KM_NMI
: KM_IRQ0
;
2144 unsigned long size
, len
= 0;
2150 ret
= __get_user_pages_fast(addr
, 1, 0, &page
);
2154 offset
= addr
& (PAGE_SIZE
- 1);
2155 size
= min(PAGE_SIZE
- offset
, n
- len
);
2157 map
= kmap_atomic(page
, type
);
2158 memcpy(to
, map
+offset
, size
);
2159 kunmap_atomic(map
, type
);
2171 static int copy_stack_frame(const void __user
*fp
, struct stack_frame
*frame
)
2173 unsigned long bytes
;
2175 bytes
= copy_from_user_nmi(frame
, fp
, sizeof(*frame
));
2177 return bytes
== sizeof(*frame
);
2181 perf_callchain_user(struct pt_regs
*regs
, struct perf_callchain_entry
*entry
)
2183 struct stack_frame frame
;
2184 const void __user
*fp
;
2186 if (!user_mode(regs
))
2187 regs
= task_pt_regs(current
);
2189 fp
= (void __user
*)regs
->bp
;
2191 callchain_store(entry
, PERF_CONTEXT_USER
);
2192 callchain_store(entry
, regs
->ip
);
2194 while (entry
->nr
< PERF_MAX_STACK_DEPTH
) {
2195 frame
.next_frame
= NULL
;
2196 frame
.return_address
= 0;
2198 if (!copy_stack_frame(fp
, &frame
))
2201 if ((unsigned long)fp
< regs
->sp
)
2204 callchain_store(entry
, frame
.return_address
);
2205 fp
= frame
.next_frame
;
2210 perf_do_callchain(struct pt_regs
*regs
, struct perf_callchain_entry
*entry
)
2217 is_user
= user_mode(regs
);
2219 if (!current
|| current
->pid
== 0)
2222 if (is_user
&& current
->state
!= TASK_RUNNING
)
2226 perf_callchain_kernel(regs
, entry
);
2229 perf_callchain_user(regs
, entry
);
2232 struct perf_callchain_entry
*perf_callchain(struct pt_regs
*regs
)
2234 struct perf_callchain_entry
*entry
;
2237 entry
= &__get_cpu_var(nmi_entry
);
2239 entry
= &__get_cpu_var(irq_entry
);
2243 perf_do_callchain(regs
, entry
);
2248 void hw_perf_counter_setup_online(int cpu
)
2250 init_debug_store_on_cpu(cpu
);