4 * Used to coordinate shared registers between HT threads or
5 * among events on a single PMU.
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/stddef.h>
11 #include <linux/types.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/export.h>
16 #include <asm/hardirq.h>
19 #include "perf_event.h"
22 * Intel PerfMon, used on Core and later.
24 static u64 intel_perfmon_event_map
[PERF_COUNT_HW_MAX
] __read_mostly
=
26 [PERF_COUNT_HW_CPU_CYCLES
] = 0x003c,
27 [PERF_COUNT_HW_INSTRUCTIONS
] = 0x00c0,
28 [PERF_COUNT_HW_CACHE_REFERENCES
] = 0x4f2e,
29 [PERF_COUNT_HW_CACHE_MISSES
] = 0x412e,
30 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = 0x00c4,
31 [PERF_COUNT_HW_BRANCH_MISSES
] = 0x00c5,
32 [PERF_COUNT_HW_BUS_CYCLES
] = 0x013c,
33 [PERF_COUNT_HW_REF_CPU_CYCLES
] = 0x0300, /* pseudo-encoding */
36 static struct event_constraint intel_core_event_constraints
[] __read_mostly
=
38 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
39 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
40 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
41 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
42 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
43 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
47 static struct event_constraint intel_core2_event_constraints
[] __read_mostly
=
49 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
50 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
51 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
52 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
53 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
54 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
55 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
56 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
57 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
58 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
59 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
60 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
61 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
65 static struct event_constraint intel_nehalem_event_constraints
[] __read_mostly
=
67 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
68 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
69 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
70 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
71 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
72 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
73 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
74 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
75 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
76 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
77 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
81 static struct extra_reg intel_nehalem_extra_regs
[] __read_mostly
=
83 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0
, 0xffff, RSP_0
),
87 static struct event_constraint intel_westmere_event_constraints
[] __read_mostly
=
89 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
90 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
91 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
92 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
93 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
94 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
95 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
99 static struct event_constraint intel_snb_event_constraints
[] __read_mostly
=
101 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
102 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
103 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
104 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
105 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
106 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
110 static struct extra_reg intel_westmere_extra_regs
[] __read_mostly
=
112 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0
, 0xffff, RSP_0
),
113 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1
, 0xffff, RSP_1
),
117 static struct event_constraint intel_v1_event_constraints
[] __read_mostly
=
122 static struct event_constraint intel_gen_event_constraints
[] __read_mostly
=
124 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
125 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
126 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
130 static struct extra_reg intel_snb_extra_regs
[] __read_mostly
= {
131 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0
, 0x3fffffffffull
, RSP_0
),
132 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1
, 0x3fffffffffull
, RSP_1
),
136 static u64
intel_pmu_event_map(int hw_event
)
138 return intel_perfmon_event_map
[hw_event
];
141 static __initconst
const u64 snb_hw_cache_event_ids
142 [PERF_COUNT_HW_CACHE_MAX
]
143 [PERF_COUNT_HW_CACHE_OP_MAX
]
144 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
148 [ C(RESULT_ACCESS
) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
149 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPLACEMENT */
152 [ C(RESULT_ACCESS
) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
153 [ C(RESULT_MISS
) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
155 [ C(OP_PREFETCH
) ] = {
156 [ C(RESULT_ACCESS
) ] = 0x0,
157 [ C(RESULT_MISS
) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
162 [ C(RESULT_ACCESS
) ] = 0x0,
163 [ C(RESULT_MISS
) ] = 0x0280, /* ICACHE.MISSES */
166 [ C(RESULT_ACCESS
) ] = -1,
167 [ C(RESULT_MISS
) ] = -1,
169 [ C(OP_PREFETCH
) ] = {
170 [ C(RESULT_ACCESS
) ] = 0x0,
171 [ C(RESULT_MISS
) ] = 0x0,
176 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
177 [ C(RESULT_ACCESS
) ] = 0x01b7,
178 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
179 [ C(RESULT_MISS
) ] = 0x01b7,
182 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
183 [ C(RESULT_ACCESS
) ] = 0x01b7,
184 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
185 [ C(RESULT_MISS
) ] = 0x01b7,
187 [ C(OP_PREFETCH
) ] = {
188 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
189 [ C(RESULT_ACCESS
) ] = 0x01b7,
190 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
191 [ C(RESULT_MISS
) ] = 0x01b7,
196 [ C(RESULT_ACCESS
) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
197 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
200 [ C(RESULT_ACCESS
) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
201 [ C(RESULT_MISS
) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
203 [ C(OP_PREFETCH
) ] = {
204 [ C(RESULT_ACCESS
) ] = 0x0,
205 [ C(RESULT_MISS
) ] = 0x0,
210 [ C(RESULT_ACCESS
) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
211 [ C(RESULT_MISS
) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
214 [ C(RESULT_ACCESS
) ] = -1,
215 [ C(RESULT_MISS
) ] = -1,
217 [ C(OP_PREFETCH
) ] = {
218 [ C(RESULT_ACCESS
) ] = -1,
219 [ C(RESULT_MISS
) ] = -1,
224 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
225 [ C(RESULT_MISS
) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
228 [ C(RESULT_ACCESS
) ] = -1,
229 [ C(RESULT_MISS
) ] = -1,
231 [ C(OP_PREFETCH
) ] = {
232 [ C(RESULT_ACCESS
) ] = -1,
233 [ C(RESULT_MISS
) ] = -1,
238 [ C(RESULT_ACCESS
) ] = -1,
239 [ C(RESULT_MISS
) ] = -1,
242 [ C(RESULT_ACCESS
) ] = -1,
243 [ C(RESULT_MISS
) ] = -1,
245 [ C(OP_PREFETCH
) ] = {
246 [ C(RESULT_ACCESS
) ] = -1,
247 [ C(RESULT_MISS
) ] = -1,
253 static __initconst
const u64 westmere_hw_cache_event_ids
254 [PERF_COUNT_HW_CACHE_MAX
]
255 [PERF_COUNT_HW_CACHE_OP_MAX
]
256 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
260 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
261 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPL */
264 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
265 [ C(RESULT_MISS
) ] = 0x0251, /* L1D.M_REPL */
267 [ C(OP_PREFETCH
) ] = {
268 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
269 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
274 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
275 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
278 [ C(RESULT_ACCESS
) ] = -1,
279 [ C(RESULT_MISS
) ] = -1,
281 [ C(OP_PREFETCH
) ] = {
282 [ C(RESULT_ACCESS
) ] = 0x0,
283 [ C(RESULT_MISS
) ] = 0x0,
288 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
289 [ C(RESULT_ACCESS
) ] = 0x01b7,
290 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
291 [ C(RESULT_MISS
) ] = 0x01b7,
294 * Use RFO, not WRITEBACK, because a write miss would typically occur
298 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
299 [ C(RESULT_ACCESS
) ] = 0x01b7,
300 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
301 [ C(RESULT_MISS
) ] = 0x01b7,
303 [ C(OP_PREFETCH
) ] = {
304 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
305 [ C(RESULT_ACCESS
) ] = 0x01b7,
306 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
307 [ C(RESULT_MISS
) ] = 0x01b7,
312 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
313 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
316 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
317 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
319 [ C(OP_PREFETCH
) ] = {
320 [ C(RESULT_ACCESS
) ] = 0x0,
321 [ C(RESULT_MISS
) ] = 0x0,
326 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
327 [ C(RESULT_MISS
) ] = 0x0185, /* ITLB_MISSES.ANY */
330 [ C(RESULT_ACCESS
) ] = -1,
331 [ C(RESULT_MISS
) ] = -1,
333 [ C(OP_PREFETCH
) ] = {
334 [ C(RESULT_ACCESS
) ] = -1,
335 [ C(RESULT_MISS
) ] = -1,
340 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
341 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
344 [ C(RESULT_ACCESS
) ] = -1,
345 [ C(RESULT_MISS
) ] = -1,
347 [ C(OP_PREFETCH
) ] = {
348 [ C(RESULT_ACCESS
) ] = -1,
349 [ C(RESULT_MISS
) ] = -1,
354 [ C(RESULT_ACCESS
) ] = 0x01b7,
355 [ C(RESULT_MISS
) ] = 0x01b7,
358 [ C(RESULT_ACCESS
) ] = 0x01b7,
359 [ C(RESULT_MISS
) ] = 0x01b7,
361 [ C(OP_PREFETCH
) ] = {
362 [ C(RESULT_ACCESS
) ] = 0x01b7,
363 [ C(RESULT_MISS
) ] = 0x01b7,
369 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
370 * See IA32 SDM Vol 3B 30.6.1.3
373 #define NHM_DMND_DATA_RD (1 << 0)
374 #define NHM_DMND_RFO (1 << 1)
375 #define NHM_DMND_IFETCH (1 << 2)
376 #define NHM_DMND_WB (1 << 3)
377 #define NHM_PF_DATA_RD (1 << 4)
378 #define NHM_PF_DATA_RFO (1 << 5)
379 #define NHM_PF_IFETCH (1 << 6)
380 #define NHM_OFFCORE_OTHER (1 << 7)
381 #define NHM_UNCORE_HIT (1 << 8)
382 #define NHM_OTHER_CORE_HIT_SNP (1 << 9)
383 #define NHM_OTHER_CORE_HITM (1 << 10)
385 #define NHM_REMOTE_CACHE_FWD (1 << 12)
386 #define NHM_REMOTE_DRAM (1 << 13)
387 #define NHM_LOCAL_DRAM (1 << 14)
388 #define NHM_NON_DRAM (1 << 15)
390 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
391 #define NHM_REMOTE (NHM_REMOTE_DRAM)
393 #define NHM_DMND_READ (NHM_DMND_DATA_RD)
394 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
395 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
397 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
398 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
399 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
401 static __initconst
const u64 nehalem_hw_cache_extra_regs
402 [PERF_COUNT_HW_CACHE_MAX
]
403 [PERF_COUNT_HW_CACHE_OP_MAX
]
404 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
408 [ C(RESULT_ACCESS
) ] = NHM_DMND_READ
|NHM_L3_ACCESS
,
409 [ C(RESULT_MISS
) ] = NHM_DMND_READ
|NHM_L3_MISS
,
412 [ C(RESULT_ACCESS
) ] = NHM_DMND_WRITE
|NHM_L3_ACCESS
,
413 [ C(RESULT_MISS
) ] = NHM_DMND_WRITE
|NHM_L3_MISS
,
415 [ C(OP_PREFETCH
) ] = {
416 [ C(RESULT_ACCESS
) ] = NHM_DMND_PREFETCH
|NHM_L3_ACCESS
,
417 [ C(RESULT_MISS
) ] = NHM_DMND_PREFETCH
|NHM_L3_MISS
,
422 [ C(RESULT_ACCESS
) ] = NHM_DMND_READ
|NHM_LOCAL
|NHM_REMOTE
,
423 [ C(RESULT_MISS
) ] = NHM_DMND_READ
|NHM_REMOTE
,
426 [ C(RESULT_ACCESS
) ] = NHM_DMND_WRITE
|NHM_LOCAL
|NHM_REMOTE
,
427 [ C(RESULT_MISS
) ] = NHM_DMND_WRITE
|NHM_REMOTE
,
429 [ C(OP_PREFETCH
) ] = {
430 [ C(RESULT_ACCESS
) ] = NHM_DMND_PREFETCH
|NHM_LOCAL
|NHM_REMOTE
,
431 [ C(RESULT_MISS
) ] = NHM_DMND_PREFETCH
|NHM_REMOTE
,
436 static __initconst
const u64 nehalem_hw_cache_event_ids
437 [PERF_COUNT_HW_CACHE_MAX
]
438 [PERF_COUNT_HW_CACHE_OP_MAX
]
439 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
443 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
444 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPL */
447 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
448 [ C(RESULT_MISS
) ] = 0x0251, /* L1D.M_REPL */
450 [ C(OP_PREFETCH
) ] = {
451 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
452 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
457 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
458 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
461 [ C(RESULT_ACCESS
) ] = -1,
462 [ C(RESULT_MISS
) ] = -1,
464 [ C(OP_PREFETCH
) ] = {
465 [ C(RESULT_ACCESS
) ] = 0x0,
466 [ C(RESULT_MISS
) ] = 0x0,
471 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
472 [ C(RESULT_ACCESS
) ] = 0x01b7,
473 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
474 [ C(RESULT_MISS
) ] = 0x01b7,
477 * Use RFO, not WRITEBACK, because a write miss would typically occur
481 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
482 [ C(RESULT_ACCESS
) ] = 0x01b7,
483 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
484 [ C(RESULT_MISS
) ] = 0x01b7,
486 [ C(OP_PREFETCH
) ] = {
487 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
488 [ C(RESULT_ACCESS
) ] = 0x01b7,
489 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
490 [ C(RESULT_MISS
) ] = 0x01b7,
495 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
496 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
499 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
500 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
502 [ C(OP_PREFETCH
) ] = {
503 [ C(RESULT_ACCESS
) ] = 0x0,
504 [ C(RESULT_MISS
) ] = 0x0,
509 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
510 [ C(RESULT_MISS
) ] = 0x20c8, /* ITLB_MISS_RETIRED */
513 [ C(RESULT_ACCESS
) ] = -1,
514 [ C(RESULT_MISS
) ] = -1,
516 [ C(OP_PREFETCH
) ] = {
517 [ C(RESULT_ACCESS
) ] = -1,
518 [ C(RESULT_MISS
) ] = -1,
523 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
524 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
527 [ C(RESULT_ACCESS
) ] = -1,
528 [ C(RESULT_MISS
) ] = -1,
530 [ C(OP_PREFETCH
) ] = {
531 [ C(RESULT_ACCESS
) ] = -1,
532 [ C(RESULT_MISS
) ] = -1,
537 [ C(RESULT_ACCESS
) ] = 0x01b7,
538 [ C(RESULT_MISS
) ] = 0x01b7,
541 [ C(RESULT_ACCESS
) ] = 0x01b7,
542 [ C(RESULT_MISS
) ] = 0x01b7,
544 [ C(OP_PREFETCH
) ] = {
545 [ C(RESULT_ACCESS
) ] = 0x01b7,
546 [ C(RESULT_MISS
) ] = 0x01b7,
551 static __initconst
const u64 core2_hw_cache_event_ids
552 [PERF_COUNT_HW_CACHE_MAX
]
553 [PERF_COUNT_HW_CACHE_OP_MAX
]
554 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
558 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
559 [ C(RESULT_MISS
) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
562 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
563 [ C(RESULT_MISS
) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
565 [ C(OP_PREFETCH
) ] = {
566 [ C(RESULT_ACCESS
) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
567 [ C(RESULT_MISS
) ] = 0,
572 [ C(RESULT_ACCESS
) ] = 0x0080, /* L1I.READS */
573 [ C(RESULT_MISS
) ] = 0x0081, /* L1I.MISSES */
576 [ C(RESULT_ACCESS
) ] = -1,
577 [ C(RESULT_MISS
) ] = -1,
579 [ C(OP_PREFETCH
) ] = {
580 [ C(RESULT_ACCESS
) ] = 0,
581 [ C(RESULT_MISS
) ] = 0,
586 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
587 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
590 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
591 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
593 [ C(OP_PREFETCH
) ] = {
594 [ C(RESULT_ACCESS
) ] = 0,
595 [ C(RESULT_MISS
) ] = 0,
600 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
601 [ C(RESULT_MISS
) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
604 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
605 [ C(RESULT_MISS
) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
607 [ C(OP_PREFETCH
) ] = {
608 [ C(RESULT_ACCESS
) ] = 0,
609 [ C(RESULT_MISS
) ] = 0,
614 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
615 [ C(RESULT_MISS
) ] = 0x1282, /* ITLBMISSES */
618 [ C(RESULT_ACCESS
) ] = -1,
619 [ C(RESULT_MISS
) ] = -1,
621 [ C(OP_PREFETCH
) ] = {
622 [ C(RESULT_ACCESS
) ] = -1,
623 [ C(RESULT_MISS
) ] = -1,
628 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
629 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
632 [ C(RESULT_ACCESS
) ] = -1,
633 [ C(RESULT_MISS
) ] = -1,
635 [ C(OP_PREFETCH
) ] = {
636 [ C(RESULT_ACCESS
) ] = -1,
637 [ C(RESULT_MISS
) ] = -1,
642 static __initconst
const u64 atom_hw_cache_event_ids
643 [PERF_COUNT_HW_CACHE_MAX
]
644 [PERF_COUNT_HW_CACHE_OP_MAX
]
645 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
649 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE.LD */
650 [ C(RESULT_MISS
) ] = 0,
653 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE.ST */
654 [ C(RESULT_MISS
) ] = 0,
656 [ C(OP_PREFETCH
) ] = {
657 [ C(RESULT_ACCESS
) ] = 0x0,
658 [ C(RESULT_MISS
) ] = 0,
663 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
664 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
667 [ C(RESULT_ACCESS
) ] = -1,
668 [ C(RESULT_MISS
) ] = -1,
670 [ C(OP_PREFETCH
) ] = {
671 [ C(RESULT_ACCESS
) ] = 0,
672 [ C(RESULT_MISS
) ] = 0,
677 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
678 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
681 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
682 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
684 [ C(OP_PREFETCH
) ] = {
685 [ C(RESULT_ACCESS
) ] = 0,
686 [ C(RESULT_MISS
) ] = 0,
691 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
692 [ C(RESULT_MISS
) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
695 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
696 [ C(RESULT_MISS
) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
698 [ C(OP_PREFETCH
) ] = {
699 [ C(RESULT_ACCESS
) ] = 0,
700 [ C(RESULT_MISS
) ] = 0,
705 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
706 [ C(RESULT_MISS
) ] = 0x0282, /* ITLB.MISSES */
709 [ C(RESULT_ACCESS
) ] = -1,
710 [ C(RESULT_MISS
) ] = -1,
712 [ C(OP_PREFETCH
) ] = {
713 [ C(RESULT_ACCESS
) ] = -1,
714 [ C(RESULT_MISS
) ] = -1,
719 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
720 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
723 [ C(RESULT_ACCESS
) ] = -1,
724 [ C(RESULT_MISS
) ] = -1,
726 [ C(OP_PREFETCH
) ] = {
727 [ C(RESULT_ACCESS
) ] = -1,
728 [ C(RESULT_MISS
) ] = -1,
733 static inline bool intel_pmu_needs_lbr_smpl(struct perf_event
*event
)
735 /* user explicitly requested branch sampling */
736 if (has_branch_stack(event
))
739 /* implicit branch sampling to correct PEBS skid */
740 if (x86_pmu
.intel_cap
.pebs_trap
&& event
->attr
.precise_ip
> 1)
746 static void intel_pmu_disable_all(void)
748 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
750 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0);
752 if (test_bit(INTEL_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
))
753 intel_pmu_disable_bts();
755 intel_pmu_pebs_disable_all();
756 intel_pmu_lbr_disable_all();
759 static void intel_pmu_enable_all(int added
)
761 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
763 intel_pmu_pebs_enable_all();
764 intel_pmu_lbr_enable_all();
765 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
,
766 x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_guest_mask
);
768 if (test_bit(INTEL_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
)) {
769 struct perf_event
*event
=
770 cpuc
->events
[INTEL_PMC_IDX_FIXED_BTS
];
772 if (WARN_ON_ONCE(!event
))
775 intel_pmu_enable_bts(event
->hw
.config
);
781 * Intel Errata AAK100 (model 26)
782 * Intel Errata AAP53 (model 30)
783 * Intel Errata BD53 (model 44)
785 * The official story:
786 * These chips need to be 'reset' when adding counters by programming the
787 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
788 * in sequence on the same PMC or on different PMCs.
790 * In practise it appears some of these events do in fact count, and
791 * we need to programm all 4 events.
793 static void intel_pmu_nhm_workaround(void)
795 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
796 static const unsigned long nhm_magic
[4] = {
802 struct perf_event
*event
;
806 * The Errata requires below steps:
807 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
808 * 2) Configure 4 PERFEVTSELx with the magic events and clear
809 * the corresponding PMCx;
810 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
811 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
812 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
816 * The real steps we choose are a little different from above.
817 * A) To reduce MSR operations, we don't run step 1) as they
818 * are already cleared before this function is called;
819 * B) Call x86_perf_event_update to save PMCx before configuring
820 * PERFEVTSELx with magic number;
821 * C) With step 5), we do clear only when the PERFEVTSELx is
822 * not used currently.
823 * D) Call x86_perf_event_set_period to restore PMCx;
826 /* We always operate 4 pairs of PERF Counters */
827 for (i
= 0; i
< 4; i
++) {
828 event
= cpuc
->events
[i
];
830 x86_perf_event_update(event
);
833 for (i
= 0; i
< 4; i
++) {
834 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ i
, nhm_magic
[i
]);
835 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0
+ i
, 0x0);
838 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0xf);
839 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0x0);
841 for (i
= 0; i
< 4; i
++) {
842 event
= cpuc
->events
[i
];
845 x86_perf_event_set_period(event
);
846 __x86_pmu_enable_event(&event
->hw
,
847 ARCH_PERFMON_EVENTSEL_ENABLE
);
849 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ i
, 0x0);
853 static void intel_pmu_nhm_enable_all(int added
)
856 intel_pmu_nhm_workaround();
857 intel_pmu_enable_all(added
);
860 static inline u64
intel_pmu_get_status(void)
864 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS
, status
);
869 static inline void intel_pmu_ack_status(u64 ack
)
871 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL
, ack
);
874 static void intel_pmu_disable_fixed(struct hw_perf_event
*hwc
)
876 int idx
= hwc
->idx
- INTEL_PMC_IDX_FIXED
;
879 mask
= 0xfULL
<< (idx
* 4);
881 rdmsrl(hwc
->config_base
, ctrl_val
);
883 wrmsrl(hwc
->config_base
, ctrl_val
);
886 static void intel_pmu_disable_event(struct perf_event
*event
)
888 struct hw_perf_event
*hwc
= &event
->hw
;
889 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
891 if (unlikely(hwc
->idx
== INTEL_PMC_IDX_FIXED_BTS
)) {
892 intel_pmu_disable_bts();
893 intel_pmu_drain_bts_buffer();
897 cpuc
->intel_ctrl_guest_mask
&= ~(1ull << hwc
->idx
);
898 cpuc
->intel_ctrl_host_mask
&= ~(1ull << hwc
->idx
);
901 * must disable before any actual event
902 * because any event may be combined with LBR
904 if (intel_pmu_needs_lbr_smpl(event
))
905 intel_pmu_lbr_disable(event
);
907 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
908 intel_pmu_disable_fixed(hwc
);
912 x86_pmu_disable_event(event
);
914 if (unlikely(event
->attr
.precise_ip
))
915 intel_pmu_pebs_disable(event
);
918 static void intel_pmu_enable_fixed(struct hw_perf_event
*hwc
)
920 int idx
= hwc
->idx
- INTEL_PMC_IDX_FIXED
;
921 u64 ctrl_val
, bits
, mask
;
924 * Enable IRQ generation (0x8),
925 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
929 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_USR
)
931 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_OS
)
935 * ANY bit is supported in v3 and up
937 if (x86_pmu
.version
> 2 && hwc
->config
& ARCH_PERFMON_EVENTSEL_ANY
)
941 mask
= 0xfULL
<< (idx
* 4);
943 rdmsrl(hwc
->config_base
, ctrl_val
);
946 wrmsrl(hwc
->config_base
, ctrl_val
);
949 static void intel_pmu_enable_event(struct perf_event
*event
)
951 struct hw_perf_event
*hwc
= &event
->hw
;
952 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
954 if (unlikely(hwc
->idx
== INTEL_PMC_IDX_FIXED_BTS
)) {
955 if (!__this_cpu_read(cpu_hw_events
.enabled
))
958 intel_pmu_enable_bts(hwc
->config
);
962 * must enabled before any actual event
963 * because any event may be combined with LBR
965 if (intel_pmu_needs_lbr_smpl(event
))
966 intel_pmu_lbr_enable(event
);
968 if (event
->attr
.exclude_host
)
969 cpuc
->intel_ctrl_guest_mask
|= (1ull << hwc
->idx
);
970 if (event
->attr
.exclude_guest
)
971 cpuc
->intel_ctrl_host_mask
|= (1ull << hwc
->idx
);
973 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
974 intel_pmu_enable_fixed(hwc
);
978 if (unlikely(event
->attr
.precise_ip
))
979 intel_pmu_pebs_enable(event
);
981 __x86_pmu_enable_event(hwc
, ARCH_PERFMON_EVENTSEL_ENABLE
);
985 * Save and restart an expired event. Called by NMI contexts,
986 * so it has to be careful about preempting normal event ops:
988 int intel_pmu_save_and_restart(struct perf_event
*event
)
990 x86_perf_event_update(event
);
991 return x86_perf_event_set_period(event
);
994 static void intel_pmu_reset(void)
996 struct debug_store
*ds
= __this_cpu_read(cpu_hw_events
.ds
);
1000 if (!x86_pmu
.num_counters
)
1003 local_irq_save(flags
);
1005 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
1007 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1008 wrmsrl_safe(x86_pmu_config_addr(idx
), 0ull);
1009 wrmsrl_safe(x86_pmu_event_addr(idx
), 0ull);
1011 for (idx
= 0; idx
< x86_pmu
.num_counters_fixed
; idx
++)
1012 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0
+ idx
, 0ull);
1015 ds
->bts_index
= ds
->bts_buffer_base
;
1017 local_irq_restore(flags
);
1021 * This handler is triggered by the local APIC, so the APIC IRQ handling
1024 static int intel_pmu_handle_irq(struct pt_regs
*regs
)
1026 struct perf_sample_data data
;
1027 struct cpu_hw_events
*cpuc
;
1032 cpuc
= &__get_cpu_var(cpu_hw_events
);
1035 * Some chipsets need to unmask the LVTPC in a particular spot
1036 * inside the nmi handler. As a result, the unmasking was pushed
1037 * into all the nmi handlers.
1039 * This handler doesn't seem to have any issues with the unmasking
1040 * so it was left at the top.
1042 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
1044 intel_pmu_disable_all();
1045 handled
= intel_pmu_drain_bts_buffer();
1046 status
= intel_pmu_get_status();
1048 intel_pmu_enable_all(0);
1054 intel_pmu_ack_status(status
);
1055 if (++loops
> 100) {
1056 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
1057 perf_event_print_debug();
1062 inc_irq_stat(apic_perf_irqs
);
1064 intel_pmu_lbr_read();
1067 * PEBS overflow sets bit 62 in the global status register
1069 if (__test_and_clear_bit(62, (unsigned long *)&status
)) {
1071 x86_pmu
.drain_pebs(regs
);
1074 for_each_set_bit(bit
, (unsigned long *)&status
, X86_PMC_IDX_MAX
) {
1075 struct perf_event
*event
= cpuc
->events
[bit
];
1079 if (!test_bit(bit
, cpuc
->active_mask
))
1082 if (!intel_pmu_save_and_restart(event
))
1085 perf_sample_data_init(&data
, 0, event
->hw
.last_period
);
1087 if (has_branch_stack(event
))
1088 data
.br_stack
= &cpuc
->lbr_stack
;
1090 if (perf_event_overflow(event
, &data
, regs
))
1091 x86_pmu_stop(event
, 0);
1095 * Repeat if there is more work to be done:
1097 status
= intel_pmu_get_status();
1102 intel_pmu_enable_all(0);
1106 static struct event_constraint
*
1107 intel_bts_constraints(struct perf_event
*event
)
1109 struct hw_perf_event
*hwc
= &event
->hw
;
1110 unsigned int hw_event
, bts_event
;
1112 if (event
->attr
.freq
)
1115 hw_event
= hwc
->config
& INTEL_ARCH_EVENT_MASK
;
1116 bts_event
= x86_pmu
.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS
);
1118 if (unlikely(hw_event
== bts_event
&& hwc
->sample_period
== 1))
1119 return &bts_constraint
;
1124 static int intel_alt_er(int idx
)
1126 if (!(x86_pmu
.er_flags
& ERF_HAS_RSP_1
))
1129 if (idx
== EXTRA_REG_RSP_0
)
1130 return EXTRA_REG_RSP_1
;
1132 if (idx
== EXTRA_REG_RSP_1
)
1133 return EXTRA_REG_RSP_0
;
1138 static void intel_fixup_er(struct perf_event
*event
, int idx
)
1140 event
->hw
.extra_reg
.idx
= idx
;
1142 if (idx
== EXTRA_REG_RSP_0
) {
1143 event
->hw
.config
&= ~INTEL_ARCH_EVENT_MASK
;
1144 event
->hw
.config
|= 0x01b7;
1145 event
->hw
.extra_reg
.reg
= MSR_OFFCORE_RSP_0
;
1146 } else if (idx
== EXTRA_REG_RSP_1
) {
1147 event
->hw
.config
&= ~INTEL_ARCH_EVENT_MASK
;
1148 event
->hw
.config
|= 0x01bb;
1149 event
->hw
.extra_reg
.reg
= MSR_OFFCORE_RSP_1
;
1154 * manage allocation of shared extra msr for certain events
1157 * per-cpu: to be shared between the various events on a single PMU
1158 * per-core: per-cpu + shared by HT threads
1160 static struct event_constraint
*
1161 __intel_shared_reg_get_constraints(struct cpu_hw_events
*cpuc
,
1162 struct perf_event
*event
,
1163 struct hw_perf_event_extra
*reg
)
1165 struct event_constraint
*c
= &emptyconstraint
;
1166 struct er_account
*era
;
1167 unsigned long flags
;
1171 * reg->alloc can be set due to existing state, so for fake cpuc we
1172 * need to ignore this, otherwise we might fail to allocate proper fake
1173 * state for this extra reg constraint. Also see the comment below.
1175 if (reg
->alloc
&& !cpuc
->is_fake
)
1176 return NULL
; /* call x86_get_event_constraint() */
1179 era
= &cpuc
->shared_regs
->regs
[idx
];
1181 * we use spin_lock_irqsave() to avoid lockdep issues when
1182 * passing a fake cpuc
1184 raw_spin_lock_irqsave(&era
->lock
, flags
);
1186 if (!atomic_read(&era
->ref
) || era
->config
== reg
->config
) {
1189 * If its a fake cpuc -- as per validate_{group,event}() we
1190 * shouldn't touch event state and we can avoid doing so
1191 * since both will only call get_event_constraints() once
1192 * on each event, this avoids the need for reg->alloc.
1194 * Not doing the ER fixup will only result in era->reg being
1195 * wrong, but since we won't actually try and program hardware
1196 * this isn't a problem either.
1198 if (!cpuc
->is_fake
) {
1199 if (idx
!= reg
->idx
)
1200 intel_fixup_er(event
, idx
);
1203 * x86_schedule_events() can call get_event_constraints()
1204 * multiple times on events in the case of incremental
1205 * scheduling(). reg->alloc ensures we only do the ER
1211 /* lock in msr value */
1212 era
->config
= reg
->config
;
1213 era
->reg
= reg
->reg
;
1216 atomic_inc(&era
->ref
);
1219 * need to call x86_get_event_constraint()
1220 * to check if associated event has constraints
1224 idx
= intel_alt_er(idx
);
1225 if (idx
!= reg
->idx
) {
1226 raw_spin_unlock_irqrestore(&era
->lock
, flags
);
1230 raw_spin_unlock_irqrestore(&era
->lock
, flags
);
1236 __intel_shared_reg_put_constraints(struct cpu_hw_events
*cpuc
,
1237 struct hw_perf_event_extra
*reg
)
1239 struct er_account
*era
;
1242 * Only put constraint if extra reg was actually allocated. Also takes
1243 * care of event which do not use an extra shared reg.
1245 * Also, if this is a fake cpuc we shouldn't touch any event state
1246 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
1247 * either since it'll be thrown out.
1249 if (!reg
->alloc
|| cpuc
->is_fake
)
1252 era
= &cpuc
->shared_regs
->regs
[reg
->idx
];
1254 /* one fewer user */
1255 atomic_dec(&era
->ref
);
1257 /* allocate again next time */
1261 static struct event_constraint
*
1262 intel_shared_regs_constraints(struct cpu_hw_events
*cpuc
,
1263 struct perf_event
*event
)
1265 struct event_constraint
*c
= NULL
, *d
;
1266 struct hw_perf_event_extra
*xreg
, *breg
;
1268 xreg
= &event
->hw
.extra_reg
;
1269 if (xreg
->idx
!= EXTRA_REG_NONE
) {
1270 c
= __intel_shared_reg_get_constraints(cpuc
, event
, xreg
);
1271 if (c
== &emptyconstraint
)
1274 breg
= &event
->hw
.branch_reg
;
1275 if (breg
->idx
!= EXTRA_REG_NONE
) {
1276 d
= __intel_shared_reg_get_constraints(cpuc
, event
, breg
);
1277 if (d
== &emptyconstraint
) {
1278 __intel_shared_reg_put_constraints(cpuc
, xreg
);
1285 struct event_constraint
*
1286 x86_get_event_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
1288 struct event_constraint
*c
;
1290 if (x86_pmu
.event_constraints
) {
1291 for_each_event_constraint(c
, x86_pmu
.event_constraints
) {
1292 if ((event
->hw
.config
& c
->cmask
) == c
->code
)
1297 return &unconstrained
;
1300 static struct event_constraint
*
1301 intel_get_event_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
1303 struct event_constraint
*c
;
1305 c
= intel_bts_constraints(event
);
1309 c
= intel_pebs_constraints(event
);
1313 c
= intel_shared_regs_constraints(cpuc
, event
);
1317 return x86_get_event_constraints(cpuc
, event
);
1321 intel_put_shared_regs_event_constraints(struct cpu_hw_events
*cpuc
,
1322 struct perf_event
*event
)
1324 struct hw_perf_event_extra
*reg
;
1326 reg
= &event
->hw
.extra_reg
;
1327 if (reg
->idx
!= EXTRA_REG_NONE
)
1328 __intel_shared_reg_put_constraints(cpuc
, reg
);
1330 reg
= &event
->hw
.branch_reg
;
1331 if (reg
->idx
!= EXTRA_REG_NONE
)
1332 __intel_shared_reg_put_constraints(cpuc
, reg
);
1335 static void intel_put_event_constraints(struct cpu_hw_events
*cpuc
,
1336 struct perf_event
*event
)
1338 intel_put_shared_regs_event_constraints(cpuc
, event
);
1341 static void intel_pebs_aliases_core2(struct perf_event
*event
)
1343 if ((event
->hw
.config
& X86_RAW_EVENT_MASK
) == 0x003c) {
1345 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1346 * (0x003c) so that we can use it with PEBS.
1348 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1349 * PEBS capable. However we can use INST_RETIRED.ANY_P
1350 * (0x00c0), which is a PEBS capable event, to get the same
1353 * INST_RETIRED.ANY_P counts the number of cycles that retires
1354 * CNTMASK instructions. By setting CNTMASK to a value (16)
1355 * larger than the maximum number of instructions that can be
1356 * retired per cycle (4) and then inverting the condition, we
1357 * count all cycles that retire 16 or less instructions, which
1360 * Thereby we gain a PEBS capable cycle counter.
1362 u64 alt_config
= X86_CONFIG(.event
=0xc0, .inv
=1, .cmask
=16);
1364 alt_config
|= (event
->hw
.config
& ~X86_RAW_EVENT_MASK
);
1365 event
->hw
.config
= alt_config
;
1369 static void intel_pebs_aliases_snb(struct perf_event
*event
)
1371 if ((event
->hw
.config
& X86_RAW_EVENT_MASK
) == 0x003c) {
1373 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1374 * (0x003c) so that we can use it with PEBS.
1376 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1377 * PEBS capable. However we can use UOPS_RETIRED.ALL
1378 * (0x01c2), which is a PEBS capable event, to get the same
1381 * UOPS_RETIRED.ALL counts the number of cycles that retires
1382 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
1383 * larger than the maximum number of micro-ops that can be
1384 * retired per cycle (4) and then inverting the condition, we
1385 * count all cycles that retire 16 or less micro-ops, which
1388 * Thereby we gain a PEBS capable cycle counter.
1390 u64 alt_config
= X86_CONFIG(.event
=0xc2, .umask
=0x01, .inv
=1, .cmask
=16);
1392 alt_config
|= (event
->hw
.config
& ~X86_RAW_EVENT_MASK
);
1393 event
->hw
.config
= alt_config
;
1397 static int intel_pmu_hw_config(struct perf_event
*event
)
1399 int ret
= x86_pmu_hw_config(event
);
1404 if (event
->attr
.precise_ip
&& x86_pmu
.pebs_aliases
)
1405 x86_pmu
.pebs_aliases(event
);
1407 if (intel_pmu_needs_lbr_smpl(event
)) {
1408 ret
= intel_pmu_setup_lbr_filter(event
);
1413 if (event
->attr
.type
!= PERF_TYPE_RAW
)
1416 if (!(event
->attr
.config
& ARCH_PERFMON_EVENTSEL_ANY
))
1419 if (x86_pmu
.version
< 3)
1422 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN
))
1425 event
->hw
.config
|= ARCH_PERFMON_EVENTSEL_ANY
;
1430 struct perf_guest_switch_msr
*perf_guest_get_msrs(int *nr
)
1432 if (x86_pmu
.guest_get_msrs
)
1433 return x86_pmu
.guest_get_msrs(nr
);
1437 EXPORT_SYMBOL_GPL(perf_guest_get_msrs
);
1439 static struct perf_guest_switch_msr
*intel_guest_get_msrs(int *nr
)
1441 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1442 struct perf_guest_switch_msr
*arr
= cpuc
->guest_switch_msrs
;
1444 arr
[0].msr
= MSR_CORE_PERF_GLOBAL_CTRL
;
1445 arr
[0].host
= x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_guest_mask
;
1446 arr
[0].guest
= x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_host_mask
;
1452 static struct perf_guest_switch_msr
*core_guest_get_msrs(int *nr
)
1454 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1455 struct perf_guest_switch_msr
*arr
= cpuc
->guest_switch_msrs
;
1458 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1459 struct perf_event
*event
= cpuc
->events
[idx
];
1461 arr
[idx
].msr
= x86_pmu_config_addr(idx
);
1462 arr
[idx
].host
= arr
[idx
].guest
= 0;
1464 if (!test_bit(idx
, cpuc
->active_mask
))
1467 arr
[idx
].host
= arr
[idx
].guest
=
1468 event
->hw
.config
| ARCH_PERFMON_EVENTSEL_ENABLE
;
1470 if (event
->attr
.exclude_host
)
1471 arr
[idx
].host
&= ~ARCH_PERFMON_EVENTSEL_ENABLE
;
1472 else if (event
->attr
.exclude_guest
)
1473 arr
[idx
].guest
&= ~ARCH_PERFMON_EVENTSEL_ENABLE
;
1476 *nr
= x86_pmu
.num_counters
;
1480 static void core_pmu_enable_event(struct perf_event
*event
)
1482 if (!event
->attr
.exclude_host
)
1483 x86_pmu_enable_event(event
);
1486 static void core_pmu_enable_all(int added
)
1488 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1491 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1492 struct hw_perf_event
*hwc
= &cpuc
->events
[idx
]->hw
;
1494 if (!test_bit(idx
, cpuc
->active_mask
) ||
1495 cpuc
->events
[idx
]->attr
.exclude_host
)
1498 __x86_pmu_enable_event(hwc
, ARCH_PERFMON_EVENTSEL_ENABLE
);
1502 PMU_FORMAT_ATTR(event
, "config:0-7" );
1503 PMU_FORMAT_ATTR(umask
, "config:8-15" );
1504 PMU_FORMAT_ATTR(edge
, "config:18" );
1505 PMU_FORMAT_ATTR(pc
, "config:19" );
1506 PMU_FORMAT_ATTR(any
, "config:21" ); /* v3 + */
1507 PMU_FORMAT_ATTR(inv
, "config:23" );
1508 PMU_FORMAT_ATTR(cmask
, "config:24-31" );
1510 static struct attribute
*intel_arch_formats_attr
[] = {
1511 &format_attr_event
.attr
,
1512 &format_attr_umask
.attr
,
1513 &format_attr_edge
.attr
,
1514 &format_attr_pc
.attr
,
1515 &format_attr_inv
.attr
,
1516 &format_attr_cmask
.attr
,
1520 static __initconst
const struct x86_pmu core_pmu
= {
1522 .handle_irq
= x86_pmu_handle_irq
,
1523 .disable_all
= x86_pmu_disable_all
,
1524 .enable_all
= core_pmu_enable_all
,
1525 .enable
= core_pmu_enable_event
,
1526 .disable
= x86_pmu_disable_event
,
1527 .hw_config
= x86_pmu_hw_config
,
1528 .schedule_events
= x86_schedule_events
,
1529 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
1530 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
1531 .event_map
= intel_pmu_event_map
,
1532 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
1535 * Intel PMCs cannot be accessed sanely above 32 bit width,
1536 * so we install an artificial 1<<31 period regardless of
1537 * the generic event period:
1539 .max_period
= (1ULL << 31) - 1,
1540 .get_event_constraints
= intel_get_event_constraints
,
1541 .put_event_constraints
= intel_put_event_constraints
,
1542 .event_constraints
= intel_core_event_constraints
,
1543 .guest_get_msrs
= core_guest_get_msrs
,
1544 .format_attrs
= intel_arch_formats_attr
,
1547 struct intel_shared_regs
*allocate_shared_regs(int cpu
)
1549 struct intel_shared_regs
*regs
;
1552 regs
= kzalloc_node(sizeof(struct intel_shared_regs
),
1553 GFP_KERNEL
, cpu_to_node(cpu
));
1556 * initialize the locks to keep lockdep happy
1558 for (i
= 0; i
< EXTRA_REG_MAX
; i
++)
1559 raw_spin_lock_init(®s
->regs
[i
].lock
);
1566 static int intel_pmu_cpu_prepare(int cpu
)
1568 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1570 if (!(x86_pmu
.extra_regs
|| x86_pmu
.lbr_sel_map
))
1573 cpuc
->shared_regs
= allocate_shared_regs(cpu
);
1574 if (!cpuc
->shared_regs
)
1580 static void intel_pmu_cpu_starting(int cpu
)
1582 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1583 int core_id
= topology_core_id(cpu
);
1586 init_debug_store_on_cpu(cpu
);
1588 * Deal with CPUs that don't clear their LBRs on power-up.
1590 intel_pmu_lbr_reset();
1592 cpuc
->lbr_sel
= NULL
;
1594 if (!cpuc
->shared_regs
)
1597 if (!(x86_pmu
.er_flags
& ERF_NO_HT_SHARING
)) {
1598 for_each_cpu(i
, topology_thread_cpumask(cpu
)) {
1599 struct intel_shared_regs
*pc
;
1601 pc
= per_cpu(cpu_hw_events
, i
).shared_regs
;
1602 if (pc
&& pc
->core_id
== core_id
) {
1603 cpuc
->kfree_on_online
= cpuc
->shared_regs
;
1604 cpuc
->shared_regs
= pc
;
1608 cpuc
->shared_regs
->core_id
= core_id
;
1609 cpuc
->shared_regs
->refcnt
++;
1612 if (x86_pmu
.lbr_sel_map
)
1613 cpuc
->lbr_sel
= &cpuc
->shared_regs
->regs
[EXTRA_REG_LBR
];
1616 static void intel_pmu_cpu_dying(int cpu
)
1618 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1619 struct intel_shared_regs
*pc
;
1621 pc
= cpuc
->shared_regs
;
1623 if (pc
->core_id
== -1 || --pc
->refcnt
== 0)
1625 cpuc
->shared_regs
= NULL
;
1628 fini_debug_store_on_cpu(cpu
);
1631 static void intel_pmu_flush_branch_stack(void)
1634 * Intel LBR does not tag entries with the
1635 * PID of the current task, then we need to
1637 * For now, we simply reset it
1640 intel_pmu_lbr_reset();
1643 PMU_FORMAT_ATTR(offcore_rsp
, "config1:0-63");
1645 static struct attribute
*intel_arch3_formats_attr
[] = {
1646 &format_attr_event
.attr
,
1647 &format_attr_umask
.attr
,
1648 &format_attr_edge
.attr
,
1649 &format_attr_pc
.attr
,
1650 &format_attr_any
.attr
,
1651 &format_attr_inv
.attr
,
1652 &format_attr_cmask
.attr
,
1654 &format_attr_offcore_rsp
.attr
, /* XXX do NHM/WSM + SNB breakout */
1658 static __initconst
const struct x86_pmu intel_pmu
= {
1660 .handle_irq
= intel_pmu_handle_irq
,
1661 .disable_all
= intel_pmu_disable_all
,
1662 .enable_all
= intel_pmu_enable_all
,
1663 .enable
= intel_pmu_enable_event
,
1664 .disable
= intel_pmu_disable_event
,
1665 .hw_config
= intel_pmu_hw_config
,
1666 .schedule_events
= x86_schedule_events
,
1667 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
1668 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
1669 .event_map
= intel_pmu_event_map
,
1670 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
1673 * Intel PMCs cannot be accessed sanely above 32 bit width,
1674 * so we install an artificial 1<<31 period regardless of
1675 * the generic event period:
1677 .max_period
= (1ULL << 31) - 1,
1678 .get_event_constraints
= intel_get_event_constraints
,
1679 .put_event_constraints
= intel_put_event_constraints
,
1680 .pebs_aliases
= intel_pebs_aliases_core2
,
1682 .format_attrs
= intel_arch3_formats_attr
,
1684 .cpu_prepare
= intel_pmu_cpu_prepare
,
1685 .cpu_starting
= intel_pmu_cpu_starting
,
1686 .cpu_dying
= intel_pmu_cpu_dying
,
1687 .guest_get_msrs
= intel_guest_get_msrs
,
1688 .flush_branch_stack
= intel_pmu_flush_branch_stack
,
1691 static __init
void intel_clovertown_quirk(void)
1694 * PEBS is unreliable due to:
1696 * AJ67 - PEBS may experience CPL leaks
1697 * AJ68 - PEBS PMI may be delayed by one event
1698 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
1699 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
1701 * AJ67 could be worked around by restricting the OS/USR flags.
1702 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
1704 * AJ106 could possibly be worked around by not allowing LBR
1705 * usage from PEBS, including the fixup.
1706 * AJ68 could possibly be worked around by always programming
1707 * a pebs_event_reset[0] value and coping with the lost events.
1709 * But taken together it might just make sense to not enable PEBS on
1712 pr_warn("PEBS disabled due to CPU errata\n");
1714 x86_pmu
.pebs_constraints
= NULL
;
1717 static int intel_snb_pebs_broken(int cpu
)
1719 u32 rev
= UINT_MAX
; /* default to broken for unknown models */
1721 switch (cpu_data(cpu
).x86_model
) {
1726 case 45: /* SNB-EP */
1727 switch (cpu_data(cpu
).x86_mask
) {
1728 case 6: rev
= 0x618; break;
1729 case 7: rev
= 0x70c; break;
1733 return (cpu_data(cpu
).microcode
< rev
);
1736 static void intel_snb_check_microcode(void)
1738 int pebs_broken
= 0;
1742 for_each_online_cpu(cpu
) {
1743 if ((pebs_broken
= intel_snb_pebs_broken(cpu
)))
1748 if (pebs_broken
== x86_pmu
.pebs_broken
)
1752 * Serialized by the microcode lock..
1754 if (x86_pmu
.pebs_broken
) {
1755 pr_info("PEBS enabled due to microcode update\n");
1756 x86_pmu
.pebs_broken
= 0;
1758 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
1759 x86_pmu
.pebs_broken
= 1;
1763 static __init
void intel_sandybridge_quirk(void)
1765 x86_pmu
.check_microcode
= intel_snb_check_microcode
;
1766 intel_snb_check_microcode();
1769 static const struct { int id
; char *name
; } intel_arch_events_map
[] __initconst
= {
1770 { PERF_COUNT_HW_CPU_CYCLES
, "cpu cycles" },
1771 { PERF_COUNT_HW_INSTRUCTIONS
, "instructions" },
1772 { PERF_COUNT_HW_BUS_CYCLES
, "bus cycles" },
1773 { PERF_COUNT_HW_CACHE_REFERENCES
, "cache references" },
1774 { PERF_COUNT_HW_CACHE_MISSES
, "cache misses" },
1775 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS
, "branch instructions" },
1776 { PERF_COUNT_HW_BRANCH_MISSES
, "branch misses" },
1779 static __init
void intel_arch_events_quirk(void)
1783 /* disable event that reported as not presend by cpuid */
1784 for_each_set_bit(bit
, x86_pmu
.events_mask
, ARRAY_SIZE(intel_arch_events_map
)) {
1785 intel_perfmon_event_map
[intel_arch_events_map
[bit
].id
] = 0;
1786 pr_warn("CPUID marked event: \'%s\' unavailable\n",
1787 intel_arch_events_map
[bit
].name
);
1791 static __init
void intel_nehalem_quirk(void)
1793 union cpuid10_ebx ebx
;
1795 ebx
.full
= x86_pmu
.events_maskl
;
1796 if (ebx
.split
.no_branch_misses_retired
) {
1798 * Erratum AAJ80 detected, we work it around by using
1799 * the BR_MISP_EXEC.ANY event. This will over-count
1800 * branch-misses, but it's still much better than the
1801 * architectural event which is often completely bogus:
1803 intel_perfmon_event_map
[PERF_COUNT_HW_BRANCH_MISSES
] = 0x7f89;
1804 ebx
.split
.no_branch_misses_retired
= 0;
1805 x86_pmu
.events_maskl
= ebx
.full
;
1806 pr_info("CPU erratum AAJ80 worked around\n");
1810 __init
int intel_pmu_init(void)
1812 union cpuid10_edx edx
;
1813 union cpuid10_eax eax
;
1814 union cpuid10_ebx ebx
;
1815 struct event_constraint
*c
;
1816 unsigned int unused
;
1819 if (!cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
)) {
1820 switch (boot_cpu_data
.x86
) {
1822 return p6_pmu_init();
1824 return p4_pmu_init();
1830 * Check whether the Architectural PerfMon supports
1831 * Branch Misses Retired hw_event or not.
1833 cpuid(10, &eax
.full
, &ebx
.full
, &unused
, &edx
.full
);
1834 if (eax
.split
.mask_length
< ARCH_PERFMON_EVENTS_COUNT
)
1837 version
= eax
.split
.version_id
;
1841 x86_pmu
= intel_pmu
;
1843 x86_pmu
.version
= version
;
1844 x86_pmu
.num_counters
= eax
.split
.num_counters
;
1845 x86_pmu
.cntval_bits
= eax
.split
.bit_width
;
1846 x86_pmu
.cntval_mask
= (1ULL << eax
.split
.bit_width
) - 1;
1848 x86_pmu
.events_maskl
= ebx
.full
;
1849 x86_pmu
.events_mask_len
= eax
.split
.mask_length
;
1851 x86_pmu
.max_pebs_events
= min_t(unsigned, MAX_PEBS_EVENTS
, x86_pmu
.num_counters
);
1854 * Quirk: v2 perfmon does not report fixed-purpose events, so
1855 * assume at least 3 events:
1858 x86_pmu
.num_counters_fixed
= max((int)edx
.split
.num_counters_fixed
, 3);
1861 * v2 and above have a perf capabilities MSR
1866 rdmsrl(MSR_IA32_PERF_CAPABILITIES
, capabilities
);
1867 x86_pmu
.intel_cap
.capabilities
= capabilities
;
1872 x86_add_quirk(intel_arch_events_quirk
); /* Install first, so it runs last */
1875 * Install the hw-cache-events table:
1877 switch (boot_cpu_data
.x86_model
) {
1878 case 14: /* 65 nm core solo/duo, "Yonah" */
1879 pr_cont("Core events, ");
1882 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
1883 x86_add_quirk(intel_clovertown_quirk
);
1884 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
1885 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
1886 case 29: /* six-core 45 nm xeon "Dunnington" */
1887 memcpy(hw_cache_event_ids
, core2_hw_cache_event_ids
,
1888 sizeof(hw_cache_event_ids
));
1890 intel_pmu_lbr_init_core();
1892 x86_pmu
.event_constraints
= intel_core2_event_constraints
;
1893 x86_pmu
.pebs_constraints
= intel_core2_pebs_event_constraints
;
1894 pr_cont("Core2 events, ");
1897 case 26: /* 45 nm nehalem, "Bloomfield" */
1898 case 30: /* 45 nm nehalem, "Lynnfield" */
1899 case 46: /* 45 nm nehalem-ex, "Beckton" */
1900 memcpy(hw_cache_event_ids
, nehalem_hw_cache_event_ids
,
1901 sizeof(hw_cache_event_ids
));
1902 memcpy(hw_cache_extra_regs
, nehalem_hw_cache_extra_regs
,
1903 sizeof(hw_cache_extra_regs
));
1905 intel_pmu_lbr_init_nhm();
1907 x86_pmu
.event_constraints
= intel_nehalem_event_constraints
;
1908 x86_pmu
.pebs_constraints
= intel_nehalem_pebs_event_constraints
;
1909 x86_pmu
.enable_all
= intel_pmu_nhm_enable_all
;
1910 x86_pmu
.extra_regs
= intel_nehalem_extra_regs
;
1912 /* UOPS_ISSUED.STALLED_CYCLES */
1913 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
1914 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
1915 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
1916 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
1917 X86_CONFIG(.event
=0xb1, .umask
=0x3f, .inv
=1, .cmask
=1);
1919 x86_add_quirk(intel_nehalem_quirk
);
1921 pr_cont("Nehalem events, ");
1925 memcpy(hw_cache_event_ids
, atom_hw_cache_event_ids
,
1926 sizeof(hw_cache_event_ids
));
1928 intel_pmu_lbr_init_atom();
1930 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
1931 x86_pmu
.pebs_constraints
= intel_atom_pebs_event_constraints
;
1932 pr_cont("Atom events, ");
1935 case 37: /* 32 nm nehalem, "Clarkdale" */
1936 case 44: /* 32 nm nehalem, "Gulftown" */
1937 case 47: /* 32 nm Xeon E7 */
1938 memcpy(hw_cache_event_ids
, westmere_hw_cache_event_ids
,
1939 sizeof(hw_cache_event_ids
));
1940 memcpy(hw_cache_extra_regs
, nehalem_hw_cache_extra_regs
,
1941 sizeof(hw_cache_extra_regs
));
1943 intel_pmu_lbr_init_nhm();
1945 x86_pmu
.event_constraints
= intel_westmere_event_constraints
;
1946 x86_pmu
.enable_all
= intel_pmu_nhm_enable_all
;
1947 x86_pmu
.pebs_constraints
= intel_westmere_pebs_event_constraints
;
1948 x86_pmu
.extra_regs
= intel_westmere_extra_regs
;
1949 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
1951 /* UOPS_ISSUED.STALLED_CYCLES */
1952 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
1953 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
1954 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
1955 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
1956 X86_CONFIG(.event
=0xb1, .umask
=0x3f, .inv
=1, .cmask
=1);
1958 pr_cont("Westmere events, ");
1961 case 42: /* SandyBridge */
1962 case 45: /* SandyBridge, "Romely-EP" */
1963 x86_add_quirk(intel_sandybridge_quirk
);
1964 case 58: /* IvyBridge */
1965 memcpy(hw_cache_event_ids
, snb_hw_cache_event_ids
,
1966 sizeof(hw_cache_event_ids
));
1968 intel_pmu_lbr_init_snb();
1970 x86_pmu
.event_constraints
= intel_snb_event_constraints
;
1971 x86_pmu
.pebs_constraints
= intel_snb_pebs_event_constraints
;
1972 x86_pmu
.pebs_aliases
= intel_pebs_aliases_snb
;
1973 x86_pmu
.extra_regs
= intel_snb_extra_regs
;
1974 /* all extra regs are per-cpu when HT is on */
1975 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
1976 x86_pmu
.er_flags
|= ERF_NO_HT_SHARING
;
1978 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
1979 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
1980 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
1981 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
1982 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
1983 X86_CONFIG(.event
=0xb1, .umask
=0x01, .inv
=1, .cmask
=1);
1985 pr_cont("SandyBridge events, ");
1989 switch (x86_pmu
.version
) {
1991 x86_pmu
.event_constraints
= intel_v1_event_constraints
;
1992 pr_cont("generic architected perfmon v1, ");
1996 * default constraints for v2 and up
1998 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
1999 pr_cont("generic architected perfmon, ");
2004 if (x86_pmu
.num_counters
> INTEL_PMC_MAX_GENERIC
) {
2005 WARN(1, KERN_ERR
"hw perf events %d > max(%d), clipping!",
2006 x86_pmu
.num_counters
, INTEL_PMC_MAX_GENERIC
);
2007 x86_pmu
.num_counters
= INTEL_PMC_MAX_GENERIC
;
2009 x86_pmu
.intel_ctrl
= (1 << x86_pmu
.num_counters
) - 1;
2011 if (x86_pmu
.num_counters_fixed
> INTEL_PMC_MAX_FIXED
) {
2012 WARN(1, KERN_ERR
"hw perf events fixed %d > max(%d), clipping!",
2013 x86_pmu
.num_counters_fixed
, INTEL_PMC_MAX_FIXED
);
2014 x86_pmu
.num_counters_fixed
= INTEL_PMC_MAX_FIXED
;
2017 x86_pmu
.intel_ctrl
|=
2018 ((1LL << x86_pmu
.num_counters_fixed
)-1) << INTEL_PMC_IDX_FIXED
;
2020 if (x86_pmu
.event_constraints
) {
2022 * event on fixed counter2 (REF_CYCLES) only works on this
2023 * counter, so do not extend mask to generic counters
2025 for_each_event_constraint(c
, x86_pmu
.event_constraints
) {
2026 if (c
->cmask
!= X86_RAW_EVENT_MASK
2027 || c
->idxmsk64
== INTEL_PMC_MSK_FIXED_REF_CYCLES
) {
2031 c
->idxmsk64
|= (1ULL << x86_pmu
.num_counters
) - 1;
2032 c
->weight
+= x86_pmu
.num_counters
;