2 * Performance events x86 architecture code
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10 * Copyright (C) 2009 Google, Inc., Stephane Eranian
12 * For licencing details see kernel-base/COPYING
15 #include <linux/perf_event.h>
16 #include <linux/capability.h>
17 #include <linux/notifier.h>
18 #include <linux/hardirq.h>
19 #include <linux/kprobes.h>
20 #include <linux/module.h>
21 #include <linux/kdebug.h>
22 #include <linux/sched.h>
23 #include <linux/uaccess.h>
24 #include <linux/highmem.h>
25 #include <linux/cpu.h>
28 #include <asm/stacktrace.h>
31 static u64 perf_event_mask __read_mostly
;
33 /* The maximal number of PEBS events: */
34 #define MAX_PEBS_EVENTS 4
36 /* The size of a BTS record in bytes: */
37 #define BTS_RECORD_SIZE 24
39 /* The size of a per-cpu BTS buffer in bytes: */
40 #define BTS_BUFFER_SIZE (BTS_RECORD_SIZE * 2048)
42 /* The BTS overflow threshold in bytes from the end of the buffer: */
43 #define BTS_OVFL_TH (BTS_RECORD_SIZE * 128)
47 * Bits in the debugctlmsr controlling branch tracing.
49 #define X86_DEBUGCTL_TR (1 << 6)
50 #define X86_DEBUGCTL_BTS (1 << 7)
51 #define X86_DEBUGCTL_BTINT (1 << 8)
52 #define X86_DEBUGCTL_BTS_OFF_OS (1 << 9)
53 #define X86_DEBUGCTL_BTS_OFF_USR (1 << 10)
56 * A debug store configuration.
58 * We only support architectures that use 64bit fields.
63 u64 bts_absolute_maximum
;
64 u64 bts_interrupt_threshold
;
67 u64 pebs_absolute_maximum
;
68 u64 pebs_interrupt_threshold
;
69 u64 pebs_event_reset
[MAX_PEBS_EVENTS
];
72 #define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64))
74 struct event_constraint
{
75 u64 idxmsk
[BITS_TO_U64(X86_PMC_IDX_MAX
)];
80 struct cpu_hw_events
{
81 struct perf_event
*events
[X86_PMC_IDX_MAX
]; /* in counter order */
82 unsigned long active_mask
[BITS_TO_LONGS(X86_PMC_IDX_MAX
)];
83 unsigned long interrupts
;
85 struct debug_store
*ds
;
89 int assign
[X86_PMC_IDX_MAX
]; /* event to counter assignment */
90 struct perf_event
*event_list
[X86_PMC_IDX_MAX
]; /* in enabled order */
93 #define EVENT_CONSTRAINT(c, n, m) { \
98 #define EVENT_CONSTRAINT_END \
99 { .code = 0, .cmask = 0, .idxmsk[0] = 0 }
101 #define for_each_event_constraint(e, c) \
102 for ((e) = (c); (e)->cmask; (e)++)
105 * struct x86_pmu - generic x86 pmu
110 int (*handle_irq
)(struct pt_regs
*);
111 void (*disable_all
)(void);
112 void (*enable_all
)(void);
113 void (*enable
)(struct hw_perf_event
*, int);
114 void (*disable
)(struct hw_perf_event
*, int);
117 u64 (*event_map
)(int);
118 u64 (*raw_event
)(u64
);
121 int num_events_fixed
;
127 void (*enable_bts
)(u64 config
);
128 void (*disable_bts
)(void);
129 void (*get_event_constraints
)(struct cpu_hw_events
*cpuc
, struct perf_event
*event
, u64
*idxmsk
);
130 void (*put_event_constraints
)(struct cpu_hw_events
*cpuc
, struct perf_event
*event
);
131 const struct event_constraint
*event_constraints
;
134 static struct x86_pmu x86_pmu __read_mostly
;
136 static DEFINE_PER_CPU(struct cpu_hw_events
, cpu_hw_events
) = {
140 static int x86_perf_event_set_period(struct perf_event
*event
,
141 struct hw_perf_event
*hwc
, int idx
);
144 * Not sure about some of these
146 static const u64 p6_perfmon_event_map
[] =
148 [PERF_COUNT_HW_CPU_CYCLES
] = 0x0079,
149 [PERF_COUNT_HW_INSTRUCTIONS
] = 0x00c0,
150 [PERF_COUNT_HW_CACHE_REFERENCES
] = 0x0f2e,
151 [PERF_COUNT_HW_CACHE_MISSES
] = 0x012e,
152 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = 0x00c4,
153 [PERF_COUNT_HW_BRANCH_MISSES
] = 0x00c5,
154 [PERF_COUNT_HW_BUS_CYCLES
] = 0x0062,
157 static u64
p6_pmu_event_map(int hw_event
)
159 return p6_perfmon_event_map
[hw_event
];
163 * Event setting that is specified not to count anything.
164 * We use this to effectively disable a counter.
166 * L2_RQSTS with 0 MESI unit mask.
168 #define P6_NOP_EVENT 0x0000002EULL
170 static u64
p6_pmu_raw_event(u64 hw_event
)
172 #define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
173 #define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
174 #define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
175 #define P6_EVNTSEL_INV_MASK 0x00800000ULL
176 #define P6_EVNTSEL_REG_MASK 0xFF000000ULL
178 #define P6_EVNTSEL_MASK \
179 (P6_EVNTSEL_EVENT_MASK | \
180 P6_EVNTSEL_UNIT_MASK | \
181 P6_EVNTSEL_EDGE_MASK | \
182 P6_EVNTSEL_INV_MASK | \
185 return hw_event
& P6_EVNTSEL_MASK
;
188 static struct event_constraint intel_p6_event_constraints
[] =
190 EVENT_CONSTRAINT(0xc1, 0x1, INTEL_ARCH_EVENT_MASK
), /* FLOPS */
191 EVENT_CONSTRAINT(0x10, 0x1, INTEL_ARCH_EVENT_MASK
), /* FP_COMP_OPS_EXE */
192 EVENT_CONSTRAINT(0x11, 0x1, INTEL_ARCH_EVENT_MASK
), /* FP_ASSIST */
193 EVENT_CONSTRAINT(0x12, 0x2, INTEL_ARCH_EVENT_MASK
), /* MUL */
194 EVENT_CONSTRAINT(0x13, 0x2, INTEL_ARCH_EVENT_MASK
), /* DIV */
195 EVENT_CONSTRAINT(0x14, 0x1, INTEL_ARCH_EVENT_MASK
), /* CYCLES_DIV_BUSY */
200 * Intel PerfMon v3. Used on Core2 and later.
202 static const u64 intel_perfmon_event_map
[] =
204 [PERF_COUNT_HW_CPU_CYCLES
] = 0x003c,
205 [PERF_COUNT_HW_INSTRUCTIONS
] = 0x00c0,
206 [PERF_COUNT_HW_CACHE_REFERENCES
] = 0x4f2e,
207 [PERF_COUNT_HW_CACHE_MISSES
] = 0x412e,
208 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = 0x00c4,
209 [PERF_COUNT_HW_BRANCH_MISSES
] = 0x00c5,
210 [PERF_COUNT_HW_BUS_CYCLES
] = 0x013c,
213 static struct event_constraint intel_core_event_constraints
[] =
215 EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32)), INTEL_ARCH_FIXED_MASK
), /* INSTRUCTIONS_RETIRED */
216 EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33)), INTEL_ARCH_FIXED_MASK
), /* UNHALTED_CORE_CYCLES */
217 EVENT_CONSTRAINT(0x10, 0x1, INTEL_ARCH_EVENT_MASK
), /* FP_COMP_OPS_EXE */
218 EVENT_CONSTRAINT(0x11, 0x2, INTEL_ARCH_EVENT_MASK
), /* FP_ASSIST */
219 EVENT_CONSTRAINT(0x12, 0x2, INTEL_ARCH_EVENT_MASK
), /* MUL */
220 EVENT_CONSTRAINT(0x13, 0x2, INTEL_ARCH_EVENT_MASK
), /* DIV */
221 EVENT_CONSTRAINT(0x14, 0x1, INTEL_ARCH_EVENT_MASK
), /* CYCLES_DIV_BUSY */
222 EVENT_CONSTRAINT(0x18, 0x1, INTEL_ARCH_EVENT_MASK
), /* IDLE_DURING_DIV */
223 EVENT_CONSTRAINT(0x19, 0x2, INTEL_ARCH_EVENT_MASK
), /* DELAYED_BYPASS */
224 EVENT_CONSTRAINT(0xa1, 0x1, INTEL_ARCH_EVENT_MASK
), /* RS_UOPS_DISPATCH_CYCLES */
225 EVENT_CONSTRAINT(0xcb, 0x1, INTEL_ARCH_EVENT_MASK
), /* MEM_LOAD_RETIRED */
229 static struct event_constraint intel_nehalem_event_constraints
[] =
231 EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32)), INTEL_ARCH_FIXED_MASK
), /* INSTRUCTIONS_RETIRED */
232 EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33)), INTEL_ARCH_FIXED_MASK
), /* UNHALTED_CORE_CYCLES */
233 EVENT_CONSTRAINT(0x40, 0x3, INTEL_ARCH_EVENT_MASK
), /* L1D_CACHE_LD */
234 EVENT_CONSTRAINT(0x41, 0x3, INTEL_ARCH_EVENT_MASK
), /* L1D_CACHE_ST */
235 EVENT_CONSTRAINT(0x42, 0x3, INTEL_ARCH_EVENT_MASK
), /* L1D_CACHE_LOCK */
236 EVENT_CONSTRAINT(0x43, 0x3, INTEL_ARCH_EVENT_MASK
), /* L1D_ALL_REF */
237 EVENT_CONSTRAINT(0x4e, 0x3, INTEL_ARCH_EVENT_MASK
), /* L1D_PREFETCH */
238 EVENT_CONSTRAINT(0x4c, 0x3, INTEL_ARCH_EVENT_MASK
), /* LOAD_HIT_PRE */
239 EVENT_CONSTRAINT(0x51, 0x3, INTEL_ARCH_EVENT_MASK
), /* L1D */
240 EVENT_CONSTRAINT(0x52, 0x3, INTEL_ARCH_EVENT_MASK
), /* L1D_CACHE_PREFETCH_LOCK_FB_HIT */
241 EVENT_CONSTRAINT(0x53, 0x3, INTEL_ARCH_EVENT_MASK
), /* L1D_CACHE_LOCK_FB_HIT */
242 EVENT_CONSTRAINT(0xc5, 0x3, INTEL_ARCH_EVENT_MASK
), /* CACHE_LOCK_CYCLES */
246 static struct event_constraint intel_gen_event_constraints
[] =
248 EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32)), INTEL_ARCH_FIXED_MASK
), /* INSTRUCTIONS_RETIRED */
249 EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33)), INTEL_ARCH_FIXED_MASK
), /* UNHALTED_CORE_CYCLES */
253 static u64
intel_pmu_event_map(int hw_event
)
255 return intel_perfmon_event_map
[hw_event
];
259 * Generalized hw caching related hw_event table, filled
260 * in on a per model basis. A value of 0 means
261 * 'not supported', -1 means 'hw_event makes no sense on
262 * this CPU', any other value means the raw hw_event
266 #define C(x) PERF_COUNT_HW_CACHE_##x
268 static u64 __read_mostly hw_cache_event_ids
269 [PERF_COUNT_HW_CACHE_MAX
]
270 [PERF_COUNT_HW_CACHE_OP_MAX
]
271 [PERF_COUNT_HW_CACHE_RESULT_MAX
];
273 static __initconst u64 nehalem_hw_cache_event_ids
274 [PERF_COUNT_HW_CACHE_MAX
]
275 [PERF_COUNT_HW_CACHE_OP_MAX
]
276 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
280 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
281 [ C(RESULT_MISS
) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
284 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
285 [ C(RESULT_MISS
) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
287 [ C(OP_PREFETCH
) ] = {
288 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
289 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
294 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
295 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
298 [ C(RESULT_ACCESS
) ] = -1,
299 [ C(RESULT_MISS
) ] = -1,
301 [ C(OP_PREFETCH
) ] = {
302 [ C(RESULT_ACCESS
) ] = 0x0,
303 [ C(RESULT_MISS
) ] = 0x0,
308 [ C(RESULT_ACCESS
) ] = 0x0324, /* L2_RQSTS.LOADS */
309 [ C(RESULT_MISS
) ] = 0x0224, /* L2_RQSTS.LD_MISS */
312 [ C(RESULT_ACCESS
) ] = 0x0c24, /* L2_RQSTS.RFOS */
313 [ C(RESULT_MISS
) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
315 [ C(OP_PREFETCH
) ] = {
316 [ C(RESULT_ACCESS
) ] = 0x4f2e, /* LLC Reference */
317 [ C(RESULT_MISS
) ] = 0x412e, /* LLC Misses */
322 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
323 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
326 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
327 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
329 [ C(OP_PREFETCH
) ] = {
330 [ C(RESULT_ACCESS
) ] = 0x0,
331 [ C(RESULT_MISS
) ] = 0x0,
336 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
337 [ C(RESULT_MISS
) ] = 0x20c8, /* ITLB_MISS_RETIRED */
340 [ C(RESULT_ACCESS
) ] = -1,
341 [ C(RESULT_MISS
) ] = -1,
343 [ C(OP_PREFETCH
) ] = {
344 [ C(RESULT_ACCESS
) ] = -1,
345 [ C(RESULT_MISS
) ] = -1,
350 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
351 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
354 [ C(RESULT_ACCESS
) ] = -1,
355 [ C(RESULT_MISS
) ] = -1,
357 [ C(OP_PREFETCH
) ] = {
358 [ C(RESULT_ACCESS
) ] = -1,
359 [ C(RESULT_MISS
) ] = -1,
364 static __initconst u64 core2_hw_cache_event_ids
365 [PERF_COUNT_HW_CACHE_MAX
]
366 [PERF_COUNT_HW_CACHE_OP_MAX
]
367 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
371 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
372 [ C(RESULT_MISS
) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
375 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
376 [ C(RESULT_MISS
) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
378 [ C(OP_PREFETCH
) ] = {
379 [ C(RESULT_ACCESS
) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
380 [ C(RESULT_MISS
) ] = 0,
385 [ C(RESULT_ACCESS
) ] = 0x0080, /* L1I.READS */
386 [ C(RESULT_MISS
) ] = 0x0081, /* L1I.MISSES */
389 [ C(RESULT_ACCESS
) ] = -1,
390 [ C(RESULT_MISS
) ] = -1,
392 [ C(OP_PREFETCH
) ] = {
393 [ C(RESULT_ACCESS
) ] = 0,
394 [ C(RESULT_MISS
) ] = 0,
399 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
400 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
403 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
404 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
406 [ C(OP_PREFETCH
) ] = {
407 [ C(RESULT_ACCESS
) ] = 0,
408 [ C(RESULT_MISS
) ] = 0,
413 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
414 [ C(RESULT_MISS
) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
417 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
418 [ C(RESULT_MISS
) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
420 [ C(OP_PREFETCH
) ] = {
421 [ C(RESULT_ACCESS
) ] = 0,
422 [ C(RESULT_MISS
) ] = 0,
427 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
428 [ C(RESULT_MISS
) ] = 0x1282, /* ITLBMISSES */
431 [ C(RESULT_ACCESS
) ] = -1,
432 [ C(RESULT_MISS
) ] = -1,
434 [ C(OP_PREFETCH
) ] = {
435 [ C(RESULT_ACCESS
) ] = -1,
436 [ C(RESULT_MISS
) ] = -1,
441 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
442 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
445 [ C(RESULT_ACCESS
) ] = -1,
446 [ C(RESULT_MISS
) ] = -1,
448 [ C(OP_PREFETCH
) ] = {
449 [ C(RESULT_ACCESS
) ] = -1,
450 [ C(RESULT_MISS
) ] = -1,
455 static __initconst u64 atom_hw_cache_event_ids
456 [PERF_COUNT_HW_CACHE_MAX
]
457 [PERF_COUNT_HW_CACHE_OP_MAX
]
458 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
462 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE.LD */
463 [ C(RESULT_MISS
) ] = 0,
466 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE.ST */
467 [ C(RESULT_MISS
) ] = 0,
469 [ C(OP_PREFETCH
) ] = {
470 [ C(RESULT_ACCESS
) ] = 0x0,
471 [ C(RESULT_MISS
) ] = 0,
476 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
477 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
480 [ C(RESULT_ACCESS
) ] = -1,
481 [ C(RESULT_MISS
) ] = -1,
483 [ C(OP_PREFETCH
) ] = {
484 [ C(RESULT_ACCESS
) ] = 0,
485 [ C(RESULT_MISS
) ] = 0,
490 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
491 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
494 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
495 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
497 [ C(OP_PREFETCH
) ] = {
498 [ C(RESULT_ACCESS
) ] = 0,
499 [ C(RESULT_MISS
) ] = 0,
504 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
505 [ C(RESULT_MISS
) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
508 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
509 [ C(RESULT_MISS
) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
511 [ C(OP_PREFETCH
) ] = {
512 [ C(RESULT_ACCESS
) ] = 0,
513 [ C(RESULT_MISS
) ] = 0,
518 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
519 [ C(RESULT_MISS
) ] = 0x0282, /* ITLB.MISSES */
522 [ C(RESULT_ACCESS
) ] = -1,
523 [ C(RESULT_MISS
) ] = -1,
525 [ C(OP_PREFETCH
) ] = {
526 [ C(RESULT_ACCESS
) ] = -1,
527 [ C(RESULT_MISS
) ] = -1,
532 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
533 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
536 [ C(RESULT_ACCESS
) ] = -1,
537 [ C(RESULT_MISS
) ] = -1,
539 [ C(OP_PREFETCH
) ] = {
540 [ C(RESULT_ACCESS
) ] = -1,
541 [ C(RESULT_MISS
) ] = -1,
546 static u64
intel_pmu_raw_event(u64 hw_event
)
548 #define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
549 #define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
550 #define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
551 #define CORE_EVNTSEL_INV_MASK 0x00800000ULL
552 #define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
554 #define CORE_EVNTSEL_MASK \
555 (INTEL_ARCH_EVTSEL_MASK | \
556 INTEL_ARCH_UNIT_MASK | \
557 INTEL_ARCH_EDGE_MASK | \
558 INTEL_ARCH_INV_MASK | \
561 return hw_event
& CORE_EVNTSEL_MASK
;
564 static __initconst u64 amd_hw_cache_event_ids
565 [PERF_COUNT_HW_CACHE_MAX
]
566 [PERF_COUNT_HW_CACHE_OP_MAX
]
567 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
571 [ C(RESULT_ACCESS
) ] = 0x0040, /* Data Cache Accesses */
572 [ C(RESULT_MISS
) ] = 0x0041, /* Data Cache Misses */
575 [ C(RESULT_ACCESS
) ] = 0x0142, /* Data Cache Refills :system */
576 [ C(RESULT_MISS
) ] = 0,
578 [ C(OP_PREFETCH
) ] = {
579 [ C(RESULT_ACCESS
) ] = 0x0267, /* Data Prefetcher :attempts */
580 [ C(RESULT_MISS
) ] = 0x0167, /* Data Prefetcher :cancelled */
585 [ C(RESULT_ACCESS
) ] = 0x0080, /* Instruction cache fetches */
586 [ C(RESULT_MISS
) ] = 0x0081, /* Instruction cache misses */
589 [ C(RESULT_ACCESS
) ] = -1,
590 [ C(RESULT_MISS
) ] = -1,
592 [ C(OP_PREFETCH
) ] = {
593 [ C(RESULT_ACCESS
) ] = 0x014B, /* Prefetch Instructions :Load */
594 [ C(RESULT_MISS
) ] = 0,
599 [ C(RESULT_ACCESS
) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
600 [ C(RESULT_MISS
) ] = 0x037E, /* L2 Cache Misses : IC+DC */
603 [ C(RESULT_ACCESS
) ] = 0x017F, /* L2 Fill/Writeback */
604 [ C(RESULT_MISS
) ] = 0,
606 [ C(OP_PREFETCH
) ] = {
607 [ C(RESULT_ACCESS
) ] = 0,
608 [ C(RESULT_MISS
) ] = 0,
613 [ C(RESULT_ACCESS
) ] = 0x0040, /* Data Cache Accesses */
614 [ C(RESULT_MISS
) ] = 0x0046, /* L1 DTLB and L2 DLTB Miss */
617 [ C(RESULT_ACCESS
) ] = 0,
618 [ C(RESULT_MISS
) ] = 0,
620 [ C(OP_PREFETCH
) ] = {
621 [ C(RESULT_ACCESS
) ] = 0,
622 [ C(RESULT_MISS
) ] = 0,
627 [ C(RESULT_ACCESS
) ] = 0x0080, /* Instruction fecthes */
628 [ C(RESULT_MISS
) ] = 0x0085, /* Instr. fetch ITLB misses */
631 [ C(RESULT_ACCESS
) ] = -1,
632 [ C(RESULT_MISS
) ] = -1,
634 [ C(OP_PREFETCH
) ] = {
635 [ C(RESULT_ACCESS
) ] = -1,
636 [ C(RESULT_MISS
) ] = -1,
641 [ C(RESULT_ACCESS
) ] = 0x00c2, /* Retired Branch Instr. */
642 [ C(RESULT_MISS
) ] = 0x00c3, /* Retired Mispredicted BI */
645 [ C(RESULT_ACCESS
) ] = -1,
646 [ C(RESULT_MISS
) ] = -1,
648 [ C(OP_PREFETCH
) ] = {
649 [ C(RESULT_ACCESS
) ] = -1,
650 [ C(RESULT_MISS
) ] = -1,
656 * AMD Performance Monitor K7 and later.
658 static const u64 amd_perfmon_event_map
[] =
660 [PERF_COUNT_HW_CPU_CYCLES
] = 0x0076,
661 [PERF_COUNT_HW_INSTRUCTIONS
] = 0x00c0,
662 [PERF_COUNT_HW_CACHE_REFERENCES
] = 0x0080,
663 [PERF_COUNT_HW_CACHE_MISSES
] = 0x0081,
664 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = 0x00c4,
665 [PERF_COUNT_HW_BRANCH_MISSES
] = 0x00c5,
668 static u64
amd_pmu_event_map(int hw_event
)
670 return amd_perfmon_event_map
[hw_event
];
673 static u64
amd_pmu_raw_event(u64 hw_event
)
675 #define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
676 #define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
677 #define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
678 #define K7_EVNTSEL_INV_MASK 0x000800000ULL
679 #define K7_EVNTSEL_REG_MASK 0x0FF000000ULL
681 #define K7_EVNTSEL_MASK \
682 (K7_EVNTSEL_EVENT_MASK | \
683 K7_EVNTSEL_UNIT_MASK | \
684 K7_EVNTSEL_EDGE_MASK | \
685 K7_EVNTSEL_INV_MASK | \
688 return hw_event
& K7_EVNTSEL_MASK
;
692 * Propagate event elapsed time into the generic event.
693 * Can only be executed on the CPU where the event is active.
694 * Returns the delta events processed.
697 x86_perf_event_update(struct perf_event
*event
,
698 struct hw_perf_event
*hwc
, int idx
)
700 int shift
= 64 - x86_pmu
.event_bits
;
701 u64 prev_raw_count
, new_raw_count
;
704 if (idx
== X86_PMC_IDX_FIXED_BTS
)
708 * Careful: an NMI might modify the previous event value.
710 * Our tactic to handle this is to first atomically read and
711 * exchange a new raw count - then add that new-prev delta
712 * count to the generic event atomically:
715 prev_raw_count
= atomic64_read(&hwc
->prev_count
);
716 rdmsrl(hwc
->event_base
+ idx
, new_raw_count
);
718 if (atomic64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
719 new_raw_count
) != prev_raw_count
)
723 * Now we have the new raw value and have updated the prev
724 * timestamp already. We can now calculate the elapsed delta
725 * (event-)time and add that to the generic event.
727 * Careful, not all hw sign-extends above the physical width
730 delta
= (new_raw_count
<< shift
) - (prev_raw_count
<< shift
);
733 atomic64_add(delta
, &event
->count
);
734 atomic64_sub(delta
, &hwc
->period_left
);
736 return new_raw_count
;
739 static atomic_t active_events
;
740 static DEFINE_MUTEX(pmc_reserve_mutex
);
742 static bool reserve_pmc_hardware(void)
744 #ifdef CONFIG_X86_LOCAL_APIC
747 if (nmi_watchdog
== NMI_LOCAL_APIC
)
748 disable_lapic_nmi_watchdog();
750 for (i
= 0; i
< x86_pmu
.num_events
; i
++) {
751 if (!reserve_perfctr_nmi(x86_pmu
.perfctr
+ i
))
755 for (i
= 0; i
< x86_pmu
.num_events
; i
++) {
756 if (!reserve_evntsel_nmi(x86_pmu
.eventsel
+ i
))
763 #ifdef CONFIG_X86_LOCAL_APIC
765 for (i
--; i
>= 0; i
--)
766 release_evntsel_nmi(x86_pmu
.eventsel
+ i
);
768 i
= x86_pmu
.num_events
;
771 for (i
--; i
>= 0; i
--)
772 release_perfctr_nmi(x86_pmu
.perfctr
+ i
);
774 if (nmi_watchdog
== NMI_LOCAL_APIC
)
775 enable_lapic_nmi_watchdog();
781 static void release_pmc_hardware(void)
783 #ifdef CONFIG_X86_LOCAL_APIC
786 for (i
= 0; i
< x86_pmu
.num_events
; i
++) {
787 release_perfctr_nmi(x86_pmu
.perfctr
+ i
);
788 release_evntsel_nmi(x86_pmu
.eventsel
+ i
);
791 if (nmi_watchdog
== NMI_LOCAL_APIC
)
792 enable_lapic_nmi_watchdog();
796 static inline bool bts_available(void)
798 return x86_pmu
.enable_bts
!= NULL
;
801 static inline void init_debug_store_on_cpu(int cpu
)
803 struct debug_store
*ds
= per_cpu(cpu_hw_events
, cpu
).ds
;
808 wrmsr_on_cpu(cpu
, MSR_IA32_DS_AREA
,
809 (u32
)((u64
)(unsigned long)ds
),
810 (u32
)((u64
)(unsigned long)ds
>> 32));
813 static inline void fini_debug_store_on_cpu(int cpu
)
815 if (!per_cpu(cpu_hw_events
, cpu
).ds
)
818 wrmsr_on_cpu(cpu
, MSR_IA32_DS_AREA
, 0, 0);
821 static void release_bts_hardware(void)
825 if (!bts_available())
830 for_each_online_cpu(cpu
)
831 fini_debug_store_on_cpu(cpu
);
833 for_each_possible_cpu(cpu
) {
834 struct debug_store
*ds
= per_cpu(cpu_hw_events
, cpu
).ds
;
839 per_cpu(cpu_hw_events
, cpu
).ds
= NULL
;
841 kfree((void *)(unsigned long)ds
->bts_buffer_base
);
848 static int reserve_bts_hardware(void)
852 if (!bts_available())
857 for_each_possible_cpu(cpu
) {
858 struct debug_store
*ds
;
862 buffer
= kzalloc(BTS_BUFFER_SIZE
, GFP_KERNEL
);
863 if (unlikely(!buffer
))
866 ds
= kzalloc(sizeof(*ds
), GFP_KERNEL
);
872 ds
->bts_buffer_base
= (u64
)(unsigned long)buffer
;
873 ds
->bts_index
= ds
->bts_buffer_base
;
874 ds
->bts_absolute_maximum
=
875 ds
->bts_buffer_base
+ BTS_BUFFER_SIZE
;
876 ds
->bts_interrupt_threshold
=
877 ds
->bts_absolute_maximum
- BTS_OVFL_TH
;
879 per_cpu(cpu_hw_events
, cpu
).ds
= ds
;
884 release_bts_hardware();
886 for_each_online_cpu(cpu
)
887 init_debug_store_on_cpu(cpu
);
895 static void hw_perf_event_destroy(struct perf_event
*event
)
897 if (atomic_dec_and_mutex_lock(&active_events
, &pmc_reserve_mutex
)) {
898 release_pmc_hardware();
899 release_bts_hardware();
900 mutex_unlock(&pmc_reserve_mutex
);
904 static inline int x86_pmu_initialized(void)
906 return x86_pmu
.handle_irq
!= NULL
;
910 set_ext_hw_attr(struct hw_perf_event
*hwc
, struct perf_event_attr
*attr
)
912 unsigned int cache_type
, cache_op
, cache_result
;
915 config
= attr
->config
;
917 cache_type
= (config
>> 0) & 0xff;
918 if (cache_type
>= PERF_COUNT_HW_CACHE_MAX
)
921 cache_op
= (config
>> 8) & 0xff;
922 if (cache_op
>= PERF_COUNT_HW_CACHE_OP_MAX
)
925 cache_result
= (config
>> 16) & 0xff;
926 if (cache_result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
929 val
= hw_cache_event_ids
[cache_type
][cache_op
][cache_result
];
942 static void intel_pmu_enable_bts(u64 config
)
944 unsigned long debugctlmsr
;
946 debugctlmsr
= get_debugctlmsr();
948 debugctlmsr
|= X86_DEBUGCTL_TR
;
949 debugctlmsr
|= X86_DEBUGCTL_BTS
;
950 debugctlmsr
|= X86_DEBUGCTL_BTINT
;
952 if (!(config
& ARCH_PERFMON_EVENTSEL_OS
))
953 debugctlmsr
|= X86_DEBUGCTL_BTS_OFF_OS
;
955 if (!(config
& ARCH_PERFMON_EVENTSEL_USR
))
956 debugctlmsr
|= X86_DEBUGCTL_BTS_OFF_USR
;
958 update_debugctlmsr(debugctlmsr
);
961 static void intel_pmu_disable_bts(void)
963 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
964 unsigned long debugctlmsr
;
969 debugctlmsr
= get_debugctlmsr();
972 ~(X86_DEBUGCTL_TR
| X86_DEBUGCTL_BTS
| X86_DEBUGCTL_BTINT
|
973 X86_DEBUGCTL_BTS_OFF_OS
| X86_DEBUGCTL_BTS_OFF_USR
);
975 update_debugctlmsr(debugctlmsr
);
979 * Setup the hardware configuration for a given attr_type
981 static int __hw_perf_event_init(struct perf_event
*event
)
983 struct perf_event_attr
*attr
= &event
->attr
;
984 struct hw_perf_event
*hwc
= &event
->hw
;
988 if (!x86_pmu_initialized())
992 if (!atomic_inc_not_zero(&active_events
)) {
993 mutex_lock(&pmc_reserve_mutex
);
994 if (atomic_read(&active_events
) == 0) {
995 if (!reserve_pmc_hardware())
998 err
= reserve_bts_hardware();
1001 atomic_inc(&active_events
);
1002 mutex_unlock(&pmc_reserve_mutex
);
1007 event
->destroy
= hw_perf_event_destroy
;
1010 * Generate PMC IRQs:
1011 * (keep 'enabled' bit clear for now)
1013 hwc
->config
= ARCH_PERFMON_EVENTSEL_INT
;
1018 * Count user and OS events unless requested not to.
1020 if (!attr
->exclude_user
)
1021 hwc
->config
|= ARCH_PERFMON_EVENTSEL_USR
;
1022 if (!attr
->exclude_kernel
)
1023 hwc
->config
|= ARCH_PERFMON_EVENTSEL_OS
;
1025 if (!hwc
->sample_period
) {
1026 hwc
->sample_period
= x86_pmu
.max_period
;
1027 hwc
->last_period
= hwc
->sample_period
;
1028 atomic64_set(&hwc
->period_left
, hwc
->sample_period
);
1031 * If we have a PMU initialized but no APIC
1032 * interrupts, we cannot sample hardware
1033 * events (user-space has to fall back and
1034 * sample via a hrtimer based software event):
1041 * Raw hw_event type provide the config in the hw_event structure
1043 if (attr
->type
== PERF_TYPE_RAW
) {
1044 hwc
->config
|= x86_pmu
.raw_event(attr
->config
);
1048 if (attr
->type
== PERF_TYPE_HW_CACHE
)
1049 return set_ext_hw_attr(hwc
, attr
);
1051 if (attr
->config
>= x86_pmu
.max_events
)
1057 config
= x86_pmu
.event_map(attr
->config
);
1068 if ((attr
->config
== PERF_COUNT_HW_BRANCH_INSTRUCTIONS
) &&
1069 (hwc
->sample_period
== 1)) {
1070 /* BTS is not supported by this architecture. */
1071 if (!bts_available())
1074 /* BTS is currently only allowed for user-mode. */
1075 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_OS
)
1079 hwc
->config
|= config
;
1084 static void p6_pmu_disable_all(void)
1086 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1095 /* p6 only has one enable register */
1096 rdmsrl(MSR_P6_EVNTSEL0
, val
);
1097 val
&= ~ARCH_PERFMON_EVENTSEL0_ENABLE
;
1098 wrmsrl(MSR_P6_EVNTSEL0
, val
);
1101 static void intel_pmu_disable_all(void)
1103 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1111 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0);
1113 if (test_bit(X86_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
))
1114 intel_pmu_disable_bts();
1117 static void amd_pmu_disable_all(void)
1119 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1127 * ensure we write the disable before we start disabling the
1128 * events proper, so that amd_pmu_enable_event() does the
1133 for (idx
= 0; idx
< x86_pmu
.num_events
; idx
++) {
1136 if (!test_bit(idx
, cpuc
->active_mask
))
1138 rdmsrl(MSR_K7_EVNTSEL0
+ idx
, val
);
1139 if (!(val
& ARCH_PERFMON_EVENTSEL0_ENABLE
))
1141 val
&= ~ARCH_PERFMON_EVENTSEL0_ENABLE
;
1142 wrmsrl(MSR_K7_EVNTSEL0
+ idx
, val
);
1146 void hw_perf_disable(void)
1148 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1150 if (!x86_pmu_initialized())
1156 x86_pmu
.disable_all();
1159 static void p6_pmu_enable_all(void)
1161 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1170 /* p6 only has one enable register */
1171 rdmsrl(MSR_P6_EVNTSEL0
, val
);
1172 val
|= ARCH_PERFMON_EVENTSEL0_ENABLE
;
1173 wrmsrl(MSR_P6_EVNTSEL0
, val
);
1176 static void intel_pmu_enable_all(void)
1178 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1186 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, x86_pmu
.intel_ctrl
);
1188 if (test_bit(X86_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
)) {
1189 struct perf_event
*event
=
1190 cpuc
->events
[X86_PMC_IDX_FIXED_BTS
];
1192 if (WARN_ON_ONCE(!event
))
1195 intel_pmu_enable_bts(event
->hw
.config
);
1199 static void amd_pmu_enable_all(void)
1201 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1210 for (idx
= 0; idx
< x86_pmu
.num_events
; idx
++) {
1211 struct perf_event
*event
= cpuc
->events
[idx
];
1214 if (!test_bit(idx
, cpuc
->active_mask
))
1217 val
= event
->hw
.config
;
1218 val
|= ARCH_PERFMON_EVENTSEL0_ENABLE
;
1219 wrmsrl(MSR_K7_EVNTSEL0
+ idx
, val
);
1223 static const struct pmu pmu
;
1225 static inline int is_x86_event(struct perf_event
*event
)
1227 return event
->pmu
== &pmu
;
1230 static int x86_schedule_events(struct cpu_hw_events
*cpuc
, int n
, int *assign
)
1235 unsigned long constraints
[X86_PMC_IDX_MAX
][BITS_TO_LONGS(X86_PMC_IDX_MAX
)];
1236 unsigned long used_mask
[BITS_TO_LONGS(X86_PMC_IDX_MAX
)];
1237 struct hw_perf_event
*hwc
;
1239 bitmap_zero(used_mask
, X86_PMC_IDX_MAX
);
1241 for (i
= 0; i
< n
; i
++) {
1242 x86_pmu
.get_event_constraints(cpuc
,
1243 cpuc
->event_list
[i
],
1248 * fastpath, try to reuse previous register
1250 for (i
= 0, num
= n
; i
< n
; i
++, num
--) {
1251 hwc
= &cpuc
->event_list
[i
]->hw
;
1254 /* never assigned */
1258 /* constraint still honored */
1259 if (!test_bit(hwc
->idx
, c
))
1262 /* not already used */
1263 if (test_bit(hwc
->idx
, used_mask
))
1267 pr_debug("CPU%d fast config=0x%llx idx=%d assign=%c\n",
1271 assign
? 'y' : 'n');
1274 set_bit(hwc
->idx
, used_mask
);
1276 assign
[i
] = hwc
->idx
;
1285 bitmap_zero(used_mask
, X86_PMC_IDX_MAX
);
1288 * weight = number of possible counters
1290 * 1 = most constrained, only works on one counter
1291 * wmax = least constrained, works on any counter
1293 * assign events to counters starting with most
1294 * constrained events.
1296 wmax
= x86_pmu
.num_events
;
1299 * when fixed event counters are present,
1300 * wmax is incremented by 1 to account
1301 * for one more choice
1303 if (x86_pmu
.num_events_fixed
)
1306 for (w
= 1, num
= n
; num
&& w
<= wmax
; w
++) {
1307 /* for each event */
1308 for (i
= 0; num
&& i
< n
; i
++) {
1310 hwc
= &cpuc
->event_list
[i
]->hw
;
1312 weight
= bitmap_weight(c
, X86_PMC_IDX_MAX
);
1316 for_each_bit(j
, c
, X86_PMC_IDX_MAX
) {
1317 if (!test_bit(j
, used_mask
))
1321 if (j
== X86_PMC_IDX_MAX
)
1325 pr_debug("CPU%d slow config=0x%llx idx=%d assign=%c\n",
1329 assign
? 'y' : 'n');
1332 set_bit(j
, used_mask
);
1341 * scheduling failed or is just a simulation,
1342 * free resources if necessary
1344 if (!assign
|| num
) {
1345 for (i
= 0; i
< n
; i
++) {
1346 if (x86_pmu
.put_event_constraints
)
1347 x86_pmu
.put_event_constraints(cpuc
, cpuc
->event_list
[i
]);
1350 return num
? -ENOSPC
: 0;
1354 * dogrp: true if must collect siblings events (group)
1355 * returns total number of events and error code
1357 static int collect_events(struct cpu_hw_events
*cpuc
, struct perf_event
*leader
, bool dogrp
)
1359 struct perf_event
*event
;
1362 max_count
= x86_pmu
.num_events
+ x86_pmu
.num_events_fixed
;
1364 /* current number of events already accepted */
1367 if (is_x86_event(leader
)) {
1370 cpuc
->event_list
[n
] = leader
;
1376 list_for_each_entry(event
, &leader
->sibling_list
, group_entry
) {
1377 if (!is_x86_event(event
) ||
1378 event
->state
<= PERF_EVENT_STATE_OFF
)
1384 cpuc
->event_list
[n
] = event
;
1391 static inline void x86_assign_hw_event(struct perf_event
*event
,
1392 struct hw_perf_event
*hwc
, int idx
)
1396 if (hwc
->idx
== X86_PMC_IDX_FIXED_BTS
) {
1397 hwc
->config_base
= 0;
1398 hwc
->event_base
= 0;
1399 } else if (hwc
->idx
>= X86_PMC_IDX_FIXED
) {
1400 hwc
->config_base
= MSR_ARCH_PERFMON_FIXED_CTR_CTRL
;
1402 * We set it so that event_base + idx in wrmsr/rdmsr maps to
1403 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
1406 MSR_ARCH_PERFMON_FIXED_CTR0
- X86_PMC_IDX_FIXED
;
1408 hwc
->config_base
= x86_pmu
.eventsel
;
1409 hwc
->event_base
= x86_pmu
.perfctr
;
1413 void hw_perf_enable(void)
1415 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1416 struct perf_event
*event
;
1417 struct hw_perf_event
*hwc
;
1420 if (!x86_pmu_initialized())
1422 if (cpuc
->n_added
) {
1424 * apply assignment obtained either from
1425 * hw_perf_group_sched_in() or x86_pmu_enable()
1427 * step1: save events moving to new counters
1428 * step2: reprogram moved events into new counters
1430 for (i
= 0; i
< cpuc
->n_events
; i
++) {
1432 event
= cpuc
->event_list
[i
];
1435 if (hwc
->idx
== -1 || hwc
->idx
== cpuc
->assign
[i
])
1438 x86_pmu
.disable(hwc
, hwc
->idx
);
1440 clear_bit(hwc
->idx
, cpuc
->active_mask
);
1442 cpuc
->events
[hwc
->idx
] = NULL
;
1444 x86_perf_event_update(event
, hwc
, hwc
->idx
);
1449 for (i
= 0; i
< cpuc
->n_events
; i
++) {
1451 event
= cpuc
->event_list
[i
];
1454 if (hwc
->idx
== -1) {
1455 x86_assign_hw_event(event
, hwc
, cpuc
->assign
[i
]);
1456 x86_perf_event_set_period(event
, hwc
, hwc
->idx
);
1459 * need to mark as active because x86_pmu_disable()
1460 * clear active_mask and eventsp[] yet it preserves
1463 set_bit(hwc
->idx
, cpuc
->active_mask
);
1464 cpuc
->events
[hwc
->idx
] = event
;
1466 x86_pmu
.enable(hwc
, hwc
->idx
);
1467 perf_event_update_userpage(event
);
1470 perf_events_lapic_init();
1472 x86_pmu
.enable_all();
1475 static inline u64
intel_pmu_get_status(void)
1479 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS
, status
);
1484 static inline void intel_pmu_ack_status(u64 ack
)
1486 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL
, ack
);
1489 static inline void x86_pmu_enable_event(struct hw_perf_event
*hwc
, int idx
)
1491 (void)checking_wrmsrl(hwc
->config_base
+ idx
,
1492 hwc
->config
| ARCH_PERFMON_EVENTSEL0_ENABLE
);
1495 static inline void x86_pmu_disable_event(struct hw_perf_event
*hwc
, int idx
)
1497 (void)checking_wrmsrl(hwc
->config_base
+ idx
, hwc
->config
);
1501 intel_pmu_disable_fixed(struct hw_perf_event
*hwc
, int __idx
)
1503 int idx
= __idx
- X86_PMC_IDX_FIXED
;
1506 mask
= 0xfULL
<< (idx
* 4);
1508 rdmsrl(hwc
->config_base
, ctrl_val
);
1510 (void)checking_wrmsrl(hwc
->config_base
, ctrl_val
);
1514 p6_pmu_disable_event(struct hw_perf_event
*hwc
, int idx
)
1516 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1517 u64 val
= P6_NOP_EVENT
;
1520 val
|= ARCH_PERFMON_EVENTSEL0_ENABLE
;
1522 (void)checking_wrmsrl(hwc
->config_base
+ idx
, val
);
1526 intel_pmu_disable_event(struct hw_perf_event
*hwc
, int idx
)
1528 if (unlikely(idx
== X86_PMC_IDX_FIXED_BTS
)) {
1529 intel_pmu_disable_bts();
1533 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
1534 intel_pmu_disable_fixed(hwc
, idx
);
1538 x86_pmu_disable_event(hwc
, idx
);
1542 amd_pmu_disable_event(struct hw_perf_event
*hwc
, int idx
)
1544 x86_pmu_disable_event(hwc
, idx
);
1547 static DEFINE_PER_CPU(u64
[X86_PMC_IDX_MAX
], pmc_prev_left
);
1550 * Set the next IRQ period, based on the hwc->period_left value.
1551 * To be called with the event disabled in hw:
1554 x86_perf_event_set_period(struct perf_event
*event
,
1555 struct hw_perf_event
*hwc
, int idx
)
1557 s64 left
= atomic64_read(&hwc
->period_left
);
1558 s64 period
= hwc
->sample_period
;
1561 if (idx
== X86_PMC_IDX_FIXED_BTS
)
1565 * If we are way outside a reasonable range then just skip forward:
1567 if (unlikely(left
<= -period
)) {
1569 atomic64_set(&hwc
->period_left
, left
);
1570 hwc
->last_period
= period
;
1574 if (unlikely(left
<= 0)) {
1576 atomic64_set(&hwc
->period_left
, left
);
1577 hwc
->last_period
= period
;
1581 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
1583 if (unlikely(left
< 2))
1586 if (left
> x86_pmu
.max_period
)
1587 left
= x86_pmu
.max_period
;
1589 per_cpu(pmc_prev_left
[idx
], smp_processor_id()) = left
;
1592 * The hw event starts counting from this event offset,
1593 * mark it to be able to extra future deltas:
1595 atomic64_set(&hwc
->prev_count
, (u64
)-left
);
1597 err
= checking_wrmsrl(hwc
->event_base
+ idx
,
1598 (u64
)(-left
) & x86_pmu
.event_mask
);
1600 perf_event_update_userpage(event
);
1606 intel_pmu_enable_fixed(struct hw_perf_event
*hwc
, int __idx
)
1608 int idx
= __idx
- X86_PMC_IDX_FIXED
;
1609 u64 ctrl_val
, bits
, mask
;
1613 * Enable IRQ generation (0x8),
1614 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1618 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_USR
)
1620 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_OS
)
1623 mask
= 0xfULL
<< (idx
* 4);
1625 rdmsrl(hwc
->config_base
, ctrl_val
);
1628 err
= checking_wrmsrl(hwc
->config_base
, ctrl_val
);
1631 static void p6_pmu_enable_event(struct hw_perf_event
*hwc
, int idx
)
1633 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1638 val
|= ARCH_PERFMON_EVENTSEL0_ENABLE
;
1640 (void)checking_wrmsrl(hwc
->config_base
+ idx
, val
);
1644 static void intel_pmu_enable_event(struct hw_perf_event
*hwc
, int idx
)
1646 if (unlikely(idx
== X86_PMC_IDX_FIXED_BTS
)) {
1647 if (!__get_cpu_var(cpu_hw_events
).enabled
)
1650 intel_pmu_enable_bts(hwc
->config
);
1654 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
1655 intel_pmu_enable_fixed(hwc
, idx
);
1659 x86_pmu_enable_event(hwc
, idx
);
1662 static void amd_pmu_enable_event(struct hw_perf_event
*hwc
, int idx
)
1664 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1667 x86_pmu_enable_event(hwc
, idx
);
1671 * activate a single event
1673 * The event is added to the group of enabled events
1674 * but only if it can be scehduled with existing events.
1676 * Called with PMU disabled. If successful and return value 1,
1677 * then guaranteed to call perf_enable() and hw_perf_enable()
1679 static int x86_pmu_enable(struct perf_event
*event
)
1681 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1682 struct hw_perf_event
*hwc
;
1683 int assign
[X86_PMC_IDX_MAX
];
1688 n0
= cpuc
->n_events
;
1689 n
= collect_events(cpuc
, event
, false);
1693 ret
= x86_schedule_events(cpuc
, n
, assign
);
1697 * copy new assignment, now we know it is possible
1698 * will be used by hw_perf_enable()
1700 memcpy(cpuc
->assign
, assign
, n
*sizeof(int));
1703 cpuc
->n_added
= n
- n0
;
1706 x86_perf_event_set_period(event
, hwc
, hwc
->idx
);
1711 static void x86_pmu_unthrottle(struct perf_event
*event
)
1713 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1714 struct hw_perf_event
*hwc
= &event
->hw
;
1716 if (WARN_ON_ONCE(hwc
->idx
>= X86_PMC_IDX_MAX
||
1717 cpuc
->events
[hwc
->idx
] != event
))
1720 x86_pmu
.enable(hwc
, hwc
->idx
);
1723 void perf_event_print_debug(void)
1725 u64 ctrl
, status
, overflow
, pmc_ctrl
, pmc_count
, prev_left
, fixed
;
1726 struct cpu_hw_events
*cpuc
;
1727 unsigned long flags
;
1730 if (!x86_pmu
.num_events
)
1733 local_irq_save(flags
);
1735 cpu
= smp_processor_id();
1736 cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1738 if (x86_pmu
.version
>= 2) {
1739 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, ctrl
);
1740 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS
, status
);
1741 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL
, overflow
);
1742 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL
, fixed
);
1745 pr_info("CPU#%d: ctrl: %016llx\n", cpu
, ctrl
);
1746 pr_info("CPU#%d: status: %016llx\n", cpu
, status
);
1747 pr_info("CPU#%d: overflow: %016llx\n", cpu
, overflow
);
1748 pr_info("CPU#%d: fixed: %016llx\n", cpu
, fixed
);
1750 pr_info("CPU#%d: active: %016llx\n", cpu
, *(u64
*)cpuc
->active_mask
);
1752 for (idx
= 0; idx
< x86_pmu
.num_events
; idx
++) {
1753 rdmsrl(x86_pmu
.eventsel
+ idx
, pmc_ctrl
);
1754 rdmsrl(x86_pmu
.perfctr
+ idx
, pmc_count
);
1756 prev_left
= per_cpu(pmc_prev_left
[idx
], cpu
);
1758 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
1759 cpu
, idx
, pmc_ctrl
);
1760 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
1761 cpu
, idx
, pmc_count
);
1762 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
1763 cpu
, idx
, prev_left
);
1765 for (idx
= 0; idx
< x86_pmu
.num_events_fixed
; idx
++) {
1766 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0
+ idx
, pmc_count
);
1768 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
1769 cpu
, idx
, pmc_count
);
1771 local_irq_restore(flags
);
1774 static void intel_pmu_drain_bts_buffer(struct cpu_hw_events
*cpuc
)
1776 struct debug_store
*ds
= cpuc
->ds
;
1782 struct perf_event
*event
= cpuc
->events
[X86_PMC_IDX_FIXED_BTS
];
1783 struct bts_record
*at
, *top
;
1784 struct perf_output_handle handle
;
1785 struct perf_event_header header
;
1786 struct perf_sample_data data
;
1787 struct pt_regs regs
;
1795 at
= (struct bts_record
*)(unsigned long)ds
->bts_buffer_base
;
1796 top
= (struct bts_record
*)(unsigned long)ds
->bts_index
;
1801 ds
->bts_index
= ds
->bts_buffer_base
;
1804 data
.period
= event
->hw
.last_period
;
1810 * Prepare a generic sample, i.e. fill in the invariant fields.
1811 * We will overwrite the from and to address before we output
1814 perf_prepare_sample(&header
, &data
, event
, ®s
);
1816 if (perf_output_begin(&handle
, event
,
1817 header
.size
* (top
- at
), 1, 1))
1820 for (; at
< top
; at
++) {
1824 perf_output_sample(&handle
, &header
, &data
, event
);
1827 perf_output_end(&handle
);
1829 /* There's new data available. */
1830 event
->hw
.interrupts
++;
1831 event
->pending_kill
= POLL_IN
;
1834 static void x86_pmu_disable(struct perf_event
*event
)
1836 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1837 struct hw_perf_event
*hwc
= &event
->hw
;
1838 int i
, idx
= hwc
->idx
;
1841 * Must be done before we disable, otherwise the nmi handler
1842 * could reenable again:
1844 clear_bit(idx
, cpuc
->active_mask
);
1845 x86_pmu
.disable(hwc
, idx
);
1848 * Make sure the cleared pointer becomes visible before we
1849 * (potentially) free the event:
1854 * Drain the remaining delta count out of a event
1855 * that we are disabling:
1857 x86_perf_event_update(event
, hwc
, idx
);
1859 /* Drain the remaining BTS records. */
1860 if (unlikely(idx
== X86_PMC_IDX_FIXED_BTS
))
1861 intel_pmu_drain_bts_buffer(cpuc
);
1863 cpuc
->events
[idx
] = NULL
;
1865 for (i
= 0; i
< cpuc
->n_events
; i
++) {
1866 if (event
== cpuc
->event_list
[i
]) {
1868 if (x86_pmu
.put_event_constraints
)
1869 x86_pmu
.put_event_constraints(cpuc
, event
);
1871 while (++i
< cpuc
->n_events
)
1872 cpuc
->event_list
[i
-1] = cpuc
->event_list
[i
];
1877 perf_event_update_userpage(event
);
1881 * Save and restart an expired event. Called by NMI contexts,
1882 * so it has to be careful about preempting normal event ops:
1884 static int intel_pmu_save_and_restart(struct perf_event
*event
)
1886 struct hw_perf_event
*hwc
= &event
->hw
;
1890 x86_perf_event_update(event
, hwc
, idx
);
1891 ret
= x86_perf_event_set_period(event
, hwc
, idx
);
1893 if (event
->state
== PERF_EVENT_STATE_ACTIVE
)
1894 intel_pmu_enable_event(hwc
, idx
);
1899 static void intel_pmu_reset(void)
1901 struct debug_store
*ds
= __get_cpu_var(cpu_hw_events
).ds
;
1902 unsigned long flags
;
1905 if (!x86_pmu
.num_events
)
1908 local_irq_save(flags
);
1910 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
1912 for (idx
= 0; idx
< x86_pmu
.num_events
; idx
++) {
1913 checking_wrmsrl(x86_pmu
.eventsel
+ idx
, 0ull);
1914 checking_wrmsrl(x86_pmu
.perfctr
+ idx
, 0ull);
1916 for (idx
= 0; idx
< x86_pmu
.num_events_fixed
; idx
++) {
1917 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0
+ idx
, 0ull);
1920 ds
->bts_index
= ds
->bts_buffer_base
;
1922 local_irq_restore(flags
);
1925 static int p6_pmu_handle_irq(struct pt_regs
*regs
)
1927 struct perf_sample_data data
;
1928 struct cpu_hw_events
*cpuc
;
1929 struct perf_event
*event
;
1930 struct hw_perf_event
*hwc
;
1931 int idx
, handled
= 0;
1937 cpuc
= &__get_cpu_var(cpu_hw_events
);
1939 for (idx
= 0; idx
< x86_pmu
.num_events
; idx
++) {
1940 if (!test_bit(idx
, cpuc
->active_mask
))
1943 event
= cpuc
->events
[idx
];
1946 val
= x86_perf_event_update(event
, hwc
, idx
);
1947 if (val
& (1ULL << (x86_pmu
.event_bits
- 1)))
1954 data
.period
= event
->hw
.last_period
;
1956 if (!x86_perf_event_set_period(event
, hwc
, idx
))
1959 if (perf_event_overflow(event
, 1, &data
, regs
))
1960 p6_pmu_disable_event(hwc
, idx
);
1964 inc_irq_stat(apic_perf_irqs
);
1970 * This handler is triggered by the local APIC, so the APIC IRQ handling
1973 static int intel_pmu_handle_irq(struct pt_regs
*regs
)
1975 struct perf_sample_data data
;
1976 struct cpu_hw_events
*cpuc
;
1983 cpuc
= &__get_cpu_var(cpu_hw_events
);
1986 intel_pmu_drain_bts_buffer(cpuc
);
1987 status
= intel_pmu_get_status();
1995 if (++loops
> 100) {
1996 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
1997 perf_event_print_debug();
2003 inc_irq_stat(apic_perf_irqs
);
2005 for_each_bit(bit
, (unsigned long *)&status
, X86_PMC_IDX_MAX
) {
2006 struct perf_event
*event
= cpuc
->events
[bit
];
2008 clear_bit(bit
, (unsigned long *) &status
);
2009 if (!test_bit(bit
, cpuc
->active_mask
))
2012 if (!intel_pmu_save_and_restart(event
))
2015 data
.period
= event
->hw
.last_period
;
2017 if (perf_event_overflow(event
, 1, &data
, regs
))
2018 intel_pmu_disable_event(&event
->hw
, bit
);
2021 intel_pmu_ack_status(ack
);
2024 * Repeat if there is more work to be done:
2026 status
= intel_pmu_get_status();
2035 static int amd_pmu_handle_irq(struct pt_regs
*regs
)
2037 struct perf_sample_data data
;
2038 struct cpu_hw_events
*cpuc
;
2039 struct perf_event
*event
;
2040 struct hw_perf_event
*hwc
;
2041 int idx
, handled
= 0;
2047 cpuc
= &__get_cpu_var(cpu_hw_events
);
2049 for (idx
= 0; idx
< x86_pmu
.num_events
; idx
++) {
2050 if (!test_bit(idx
, cpuc
->active_mask
))
2053 event
= cpuc
->events
[idx
];
2056 val
= x86_perf_event_update(event
, hwc
, idx
);
2057 if (val
& (1ULL << (x86_pmu
.event_bits
- 1)))
2064 data
.period
= event
->hw
.last_period
;
2066 if (!x86_perf_event_set_period(event
, hwc
, idx
))
2069 if (perf_event_overflow(event
, 1, &data
, regs
))
2070 amd_pmu_disable_event(hwc
, idx
);
2074 inc_irq_stat(apic_perf_irqs
);
2079 void smp_perf_pending_interrupt(struct pt_regs
*regs
)
2083 inc_irq_stat(apic_pending_irqs
);
2084 perf_event_do_pending();
2088 void set_perf_event_pending(void)
2090 #ifdef CONFIG_X86_LOCAL_APIC
2091 if (!x86_pmu
.apic
|| !x86_pmu_initialized())
2094 apic
->send_IPI_self(LOCAL_PENDING_VECTOR
);
2098 void perf_events_lapic_init(void)
2100 #ifdef CONFIG_X86_LOCAL_APIC
2101 if (!x86_pmu
.apic
|| !x86_pmu_initialized())
2105 * Always use NMI for PMU
2107 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
2111 static int __kprobes
2112 perf_event_nmi_handler(struct notifier_block
*self
,
2113 unsigned long cmd
, void *__args
)
2115 struct die_args
*args
= __args
;
2116 struct pt_regs
*regs
;
2118 if (!atomic_read(&active_events
))
2132 #ifdef CONFIG_X86_LOCAL_APIC
2133 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
2136 * Can't rely on the handled return value to say it was our NMI, two
2137 * events could trigger 'simultaneously' raising two back-to-back NMIs.
2139 * If the first NMI handles both, the latter will be empty and daze
2142 x86_pmu
.handle_irq(regs
);
2147 static struct event_constraint bts_constraint
= {
2150 .idxmsk
[0] = 1ULL << X86_PMC_IDX_FIXED_BTS
2153 static int intel_special_constraints(struct perf_event
*event
,
2156 unsigned int hw_event
;
2158 hw_event
= event
->hw
.config
& INTEL_ARCH_EVENT_MASK
;
2160 if (unlikely((hw_event
==
2161 x86_pmu
.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS
)) &&
2162 (event
->hw
.sample_period
== 1))) {
2164 bitmap_copy((unsigned long *)idxmsk
,
2165 (unsigned long *)bts_constraint
.idxmsk
,
2172 static void intel_get_event_constraints(struct cpu_hw_events
*cpuc
,
2173 struct perf_event
*event
,
2176 const struct event_constraint
*c
;
2181 bitmap_zero((unsigned long *)idxmsk
, X86_PMC_IDX_MAX
);
2183 if (intel_special_constraints(event
, idxmsk
))
2186 if (x86_pmu
.event_constraints
) {
2187 for_each_event_constraint(c
, x86_pmu
.event_constraints
) {
2188 if ((event
->hw
.config
& c
->cmask
) == c
->code
) {
2190 bitmap_copy((unsigned long *)idxmsk
,
2191 (unsigned long *)c
->idxmsk
,
2197 /* no constraints, means supports all generic counters */
2198 bitmap_fill((unsigned long *)idxmsk
, x86_pmu
.num_events
);
2201 static void amd_get_event_constraints(struct cpu_hw_events
*cpuc
,
2202 struct perf_event
*event
,
2205 /* no constraints, means supports all generic counters */
2206 bitmap_fill((unsigned long *)idxmsk
, x86_pmu
.num_events
);
2209 static int x86_event_sched_in(struct perf_event
*event
,
2210 struct perf_cpu_context
*cpuctx
, int cpu
)
2214 event
->state
= PERF_EVENT_STATE_ACTIVE
;
2216 event
->tstamp_running
+= event
->ctx
->time
- event
->tstamp_stopped
;
2218 if (!is_x86_event(event
))
2219 ret
= event
->pmu
->enable(event
);
2221 if (!ret
&& !is_software_event(event
))
2222 cpuctx
->active_oncpu
++;
2224 if (!ret
&& event
->attr
.exclusive
)
2225 cpuctx
->exclusive
= 1;
2230 static void x86_event_sched_out(struct perf_event
*event
,
2231 struct perf_cpu_context
*cpuctx
, int cpu
)
2233 event
->state
= PERF_EVENT_STATE_INACTIVE
;
2236 if (!is_x86_event(event
))
2237 event
->pmu
->disable(event
);
2239 event
->tstamp_running
-= event
->ctx
->time
- event
->tstamp_stopped
;
2241 if (!is_software_event(event
))
2242 cpuctx
->active_oncpu
--;
2244 if (event
->attr
.exclusive
|| !cpuctx
->active_oncpu
)
2245 cpuctx
->exclusive
= 0;
2249 * Called to enable a whole group of events.
2250 * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
2251 * Assumes the caller has disabled interrupts and has
2252 * frozen the PMU with hw_perf_save_disable.
2254 * called with PMU disabled. If successful and return value 1,
2255 * then guaranteed to call perf_enable() and hw_perf_enable()
2257 int hw_perf_group_sched_in(struct perf_event
*leader
,
2258 struct perf_cpu_context
*cpuctx
,
2259 struct perf_event_context
*ctx
, int cpu
)
2261 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
2262 struct perf_event
*sub
;
2263 int assign
[X86_PMC_IDX_MAX
];
2266 /* n0 = total number of events */
2267 n0
= collect_events(cpuc
, leader
, true);
2271 ret
= x86_schedule_events(cpuc
, n0
, assign
);
2275 ret
= x86_event_sched_in(leader
, cpuctx
, cpu
);
2280 list_for_each_entry(sub
, &leader
->sibling_list
, group_entry
) {
2281 if (sub
->state
> PERF_EVENT_STATE_OFF
) {
2282 ret
= x86_event_sched_in(sub
, cpuctx
, cpu
);
2289 * copy new assignment, now we know it is possible
2290 * will be used by hw_perf_enable()
2292 memcpy(cpuc
->assign
, assign
, n0
*sizeof(int));
2294 cpuc
->n_events
= n0
;
2296 ctx
->nr_active
+= n1
;
2299 * 1 means successful and events are active
2300 * This is not quite true because we defer
2301 * actual activation until hw_perf_enable() but
2302 * this way we* ensure caller won't try to enable
2307 x86_event_sched_out(leader
, cpuctx
, cpu
);
2309 list_for_each_entry(sub
, &leader
->sibling_list
, group_entry
) {
2310 if (sub
->state
== PERF_EVENT_STATE_ACTIVE
) {
2311 x86_event_sched_out(sub
, cpuctx
, cpu
);
2319 static __read_mostly
struct notifier_block perf_event_nmi_notifier
= {
2320 .notifier_call
= perf_event_nmi_handler
,
2325 static __initconst
struct x86_pmu p6_pmu
= {
2327 .handle_irq
= p6_pmu_handle_irq
,
2328 .disable_all
= p6_pmu_disable_all
,
2329 .enable_all
= p6_pmu_enable_all
,
2330 .enable
= p6_pmu_enable_event
,
2331 .disable
= p6_pmu_disable_event
,
2332 .eventsel
= MSR_P6_EVNTSEL0
,
2333 .perfctr
= MSR_P6_PERFCTR0
,
2334 .event_map
= p6_pmu_event_map
,
2335 .raw_event
= p6_pmu_raw_event
,
2336 .max_events
= ARRAY_SIZE(p6_perfmon_event_map
),
2338 .max_period
= (1ULL << 31) - 1,
2342 * Events have 40 bits implemented. However they are designed such
2343 * that bits [32-39] are sign extensions of bit 31. As such the
2344 * effective width of a event for P6-like PMU is 32 bits only.
2346 * See IA-32 Intel Architecture Software developer manual Vol 3B
2349 .event_mask
= (1ULL << 32) - 1,
2350 .get_event_constraints
= intel_get_event_constraints
,
2351 .event_constraints
= intel_p6_event_constraints
2354 static __initconst
struct x86_pmu intel_pmu
= {
2356 .handle_irq
= intel_pmu_handle_irq
,
2357 .disable_all
= intel_pmu_disable_all
,
2358 .enable_all
= intel_pmu_enable_all
,
2359 .enable
= intel_pmu_enable_event
,
2360 .disable
= intel_pmu_disable_event
,
2361 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
2362 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
2363 .event_map
= intel_pmu_event_map
,
2364 .raw_event
= intel_pmu_raw_event
,
2365 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
2368 * Intel PMCs cannot be accessed sanely above 32 bit width,
2369 * so we install an artificial 1<<31 period regardless of
2370 * the generic event period:
2372 .max_period
= (1ULL << 31) - 1,
2373 .enable_bts
= intel_pmu_enable_bts
,
2374 .disable_bts
= intel_pmu_disable_bts
,
2375 .get_event_constraints
= intel_get_event_constraints
2378 static __initconst
struct x86_pmu amd_pmu
= {
2380 .handle_irq
= amd_pmu_handle_irq
,
2381 .disable_all
= amd_pmu_disable_all
,
2382 .enable_all
= amd_pmu_enable_all
,
2383 .enable
= amd_pmu_enable_event
,
2384 .disable
= amd_pmu_disable_event
,
2385 .eventsel
= MSR_K7_EVNTSEL0
,
2386 .perfctr
= MSR_K7_PERFCTR0
,
2387 .event_map
= amd_pmu_event_map
,
2388 .raw_event
= amd_pmu_raw_event
,
2389 .max_events
= ARRAY_SIZE(amd_perfmon_event_map
),
2392 .event_mask
= (1ULL << 48) - 1,
2394 /* use highest bit to detect overflow */
2395 .max_period
= (1ULL << 47) - 1,
2396 .get_event_constraints
= amd_get_event_constraints
2399 static __init
int p6_pmu_init(void)
2401 switch (boot_cpu_data
.x86_model
) {
2403 case 3: /* Pentium Pro */
2405 case 6: /* Pentium II */
2408 case 11: /* Pentium III */
2414 pr_cont("unsupported p6 CPU model %d ",
2415 boot_cpu_data
.x86_model
);
2424 static __init
int intel_pmu_init(void)
2426 union cpuid10_edx edx
;
2427 union cpuid10_eax eax
;
2428 unsigned int unused
;
2432 if (!cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
)) {
2433 /* check for P6 processor family */
2434 if (boot_cpu_data
.x86
== 6) {
2435 return p6_pmu_init();
2442 * Check whether the Architectural PerfMon supports
2443 * Branch Misses Retired hw_event or not.
2445 cpuid(10, &eax
.full
, &ebx
, &unused
, &edx
.full
);
2446 if (eax
.split
.mask_length
<= ARCH_PERFMON_BRANCH_MISSES_RETIRED
)
2449 version
= eax
.split
.version_id
;
2453 x86_pmu
= intel_pmu
;
2454 x86_pmu
.version
= version
;
2455 x86_pmu
.num_events
= eax
.split
.num_events
;
2456 x86_pmu
.event_bits
= eax
.split
.bit_width
;
2457 x86_pmu
.event_mask
= (1ULL << eax
.split
.bit_width
) - 1;
2460 * Quirk: v2 perfmon does not report fixed-purpose events, so
2461 * assume at least 3 events:
2463 x86_pmu
.num_events_fixed
= max((int)edx
.split
.num_events_fixed
, 3);
2466 * Install the hw-cache-events table:
2468 switch (boot_cpu_data
.x86_model
) {
2469 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
2470 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
2471 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
2472 case 29: /* six-core 45 nm xeon "Dunnington" */
2473 memcpy(hw_cache_event_ids
, core2_hw_cache_event_ids
,
2474 sizeof(hw_cache_event_ids
));
2476 x86_pmu
.event_constraints
= intel_core_event_constraints
;
2477 pr_cont("Core2 events, ");
2480 memcpy(hw_cache_event_ids
, nehalem_hw_cache_event_ids
,
2481 sizeof(hw_cache_event_ids
));
2483 x86_pmu
.event_constraints
= intel_nehalem_event_constraints
;
2484 pr_cont("Nehalem/Corei7 events, ");
2487 memcpy(hw_cache_event_ids
, atom_hw_cache_event_ids
,
2488 sizeof(hw_cache_event_ids
));
2490 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
2491 pr_cont("Atom events, ");
2495 * default constraints for v2 and up
2497 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
2498 pr_cont("generic architected perfmon, ");
2503 static __init
int amd_pmu_init(void)
2505 /* Performance-monitoring supported from K7 and later: */
2506 if (boot_cpu_data
.x86
< 6)
2511 /* Events are common for all AMDs */
2512 memcpy(hw_cache_event_ids
, amd_hw_cache_event_ids
,
2513 sizeof(hw_cache_event_ids
));
2518 static void __init
pmu_check_apic(void)
2524 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
2525 pr_info("no hardware sampling interrupt available.\n");
2528 void __init
init_hw_perf_events(void)
2532 pr_info("Performance Events: ");
2534 switch (boot_cpu_data
.x86_vendor
) {
2535 case X86_VENDOR_INTEL
:
2536 err
= intel_pmu_init();
2538 case X86_VENDOR_AMD
:
2539 err
= amd_pmu_init();
2545 pr_cont("no PMU driver, software events only.\n");
2551 pr_cont("%s PMU driver.\n", x86_pmu
.name
);
2553 if (x86_pmu
.num_events
> X86_PMC_MAX_GENERIC
) {
2554 WARN(1, KERN_ERR
"hw perf events %d > max(%d), clipping!",
2555 x86_pmu
.num_events
, X86_PMC_MAX_GENERIC
);
2556 x86_pmu
.num_events
= X86_PMC_MAX_GENERIC
;
2558 perf_event_mask
= (1 << x86_pmu
.num_events
) - 1;
2559 perf_max_events
= x86_pmu
.num_events
;
2561 if (x86_pmu
.num_events_fixed
> X86_PMC_MAX_FIXED
) {
2562 WARN(1, KERN_ERR
"hw perf events fixed %d > max(%d), clipping!",
2563 x86_pmu
.num_events_fixed
, X86_PMC_MAX_FIXED
);
2564 x86_pmu
.num_events_fixed
= X86_PMC_MAX_FIXED
;
2568 ((1LL << x86_pmu
.num_events_fixed
)-1) << X86_PMC_IDX_FIXED
;
2569 x86_pmu
.intel_ctrl
= perf_event_mask
;
2571 perf_events_lapic_init();
2572 register_die_notifier(&perf_event_nmi_notifier
);
2574 pr_info("... version: %d\n", x86_pmu
.version
);
2575 pr_info("... bit width: %d\n", x86_pmu
.event_bits
);
2576 pr_info("... generic registers: %d\n", x86_pmu
.num_events
);
2577 pr_info("... value mask: %016Lx\n", x86_pmu
.event_mask
);
2578 pr_info("... max period: %016Lx\n", x86_pmu
.max_period
);
2579 pr_info("... fixed-purpose events: %d\n", x86_pmu
.num_events_fixed
);
2580 pr_info("... event mask: %016Lx\n", perf_event_mask
);
2583 static inline void x86_pmu_read(struct perf_event
*event
)
2585 x86_perf_event_update(event
, &event
->hw
, event
->hw
.idx
);
2588 static const struct pmu pmu
= {
2589 .enable
= x86_pmu_enable
,
2590 .disable
= x86_pmu_disable
,
2591 .read
= x86_pmu_read
,
2592 .unthrottle
= x86_pmu_unthrottle
,
2596 * validate a single event group
2598 * validation include:
2599 * - check events are compatible which each other
2600 * - events do not compete for the same counter
2601 * - number of events <= number of counters
2603 * validation ensures the group can be loaded onto the
2604 * PMU if it was the only group available.
2606 static int validate_group(struct perf_event
*event
)
2608 struct perf_event
*leader
= event
->group_leader
;
2609 struct cpu_hw_events
*fake_cpuc
;
2613 fake_cpuc
= kmalloc(sizeof(*fake_cpuc
), GFP_KERNEL
| __GFP_ZERO
);
2618 * the event is not yet connected with its
2619 * siblings therefore we must first collect
2620 * existing siblings, then add the new event
2621 * before we can simulate the scheduling
2624 n
= collect_events(fake_cpuc
, leader
, true);
2628 fake_cpuc
->n_events
= n
;
2629 n
= collect_events(fake_cpuc
, event
, false);
2633 fake_cpuc
->n_events
= n
;
2635 ret
= x86_schedule_events(fake_cpuc
, n
, NULL
);
2643 const struct pmu
*hw_perf_event_init(struct perf_event
*event
)
2645 const struct pmu
*tmp
;
2648 err
= __hw_perf_event_init(event
);
2651 * we temporarily connect event to its pmu
2652 * such that validate_group() can classify
2653 * it as an x86 event using is_x86_event()
2658 if (event
->group_leader
!= event
)
2659 err
= validate_group(event
);
2665 event
->destroy(event
);
2666 return ERR_PTR(err
);
2677 void callchain_store(struct perf_callchain_entry
*entry
, u64 ip
)
2679 if (entry
->nr
< PERF_MAX_STACK_DEPTH
)
2680 entry
->ip
[entry
->nr
++] = ip
;
2683 static DEFINE_PER_CPU(struct perf_callchain_entry
, pmc_irq_entry
);
2684 static DEFINE_PER_CPU(struct perf_callchain_entry
, pmc_nmi_entry
);
2688 backtrace_warning_symbol(void *data
, char *msg
, unsigned long symbol
)
2690 /* Ignore warnings */
2693 static void backtrace_warning(void *data
, char *msg
)
2695 /* Ignore warnings */
2698 static int backtrace_stack(void *data
, char *name
)
2703 static void backtrace_address(void *data
, unsigned long addr
, int reliable
)
2705 struct perf_callchain_entry
*entry
= data
;
2708 callchain_store(entry
, addr
);
2711 static const struct stacktrace_ops backtrace_ops
= {
2712 .warning
= backtrace_warning
,
2713 .warning_symbol
= backtrace_warning_symbol
,
2714 .stack
= backtrace_stack
,
2715 .address
= backtrace_address
,
2716 .walk_stack
= print_context_stack_bp
,
2719 #include "../dumpstack.h"
2722 perf_callchain_kernel(struct pt_regs
*regs
, struct perf_callchain_entry
*entry
)
2724 callchain_store(entry
, PERF_CONTEXT_KERNEL
);
2725 callchain_store(entry
, regs
->ip
);
2727 dump_trace(NULL
, regs
, NULL
, regs
->bp
, &backtrace_ops
, entry
);
2731 * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
2733 static unsigned long
2734 copy_from_user_nmi(void *to
, const void __user
*from
, unsigned long n
)
2736 unsigned long offset
, addr
= (unsigned long)from
;
2737 int type
= in_nmi() ? KM_NMI
: KM_IRQ0
;
2738 unsigned long size
, len
= 0;
2744 ret
= __get_user_pages_fast(addr
, 1, 0, &page
);
2748 offset
= addr
& (PAGE_SIZE
- 1);
2749 size
= min(PAGE_SIZE
- offset
, n
- len
);
2751 map
= kmap_atomic(page
, type
);
2752 memcpy(to
, map
+offset
, size
);
2753 kunmap_atomic(map
, type
);
2765 static int copy_stack_frame(const void __user
*fp
, struct stack_frame
*frame
)
2767 unsigned long bytes
;
2769 bytes
= copy_from_user_nmi(frame
, fp
, sizeof(*frame
));
2771 return bytes
== sizeof(*frame
);
2775 perf_callchain_user(struct pt_regs
*regs
, struct perf_callchain_entry
*entry
)
2777 struct stack_frame frame
;
2778 const void __user
*fp
;
2780 if (!user_mode(regs
))
2781 regs
= task_pt_regs(current
);
2783 fp
= (void __user
*)regs
->bp
;
2785 callchain_store(entry
, PERF_CONTEXT_USER
);
2786 callchain_store(entry
, regs
->ip
);
2788 while (entry
->nr
< PERF_MAX_STACK_DEPTH
) {
2789 frame
.next_frame
= NULL
;
2790 frame
.return_address
= 0;
2792 if (!copy_stack_frame(fp
, &frame
))
2795 if ((unsigned long)fp
< regs
->sp
)
2798 callchain_store(entry
, frame
.return_address
);
2799 fp
= frame
.next_frame
;
2804 perf_do_callchain(struct pt_regs
*regs
, struct perf_callchain_entry
*entry
)
2811 is_user
= user_mode(regs
);
2813 if (is_user
&& current
->state
!= TASK_RUNNING
)
2817 perf_callchain_kernel(regs
, entry
);
2820 perf_callchain_user(regs
, entry
);
2823 struct perf_callchain_entry
*perf_callchain(struct pt_regs
*regs
)
2825 struct perf_callchain_entry
*entry
;
2828 entry
= &__get_cpu_var(pmc_nmi_entry
);
2830 entry
= &__get_cpu_var(pmc_irq_entry
);
2834 perf_do_callchain(regs
, entry
);
2839 void hw_perf_event_setup_online(int cpu
)
2841 init_debug_store_on_cpu(cpu
);