4 * Used to coordinate shared registers between HT threads or
5 * among events on a single PMU.
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/stddef.h>
11 #include <linux/types.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/export.h>
16 #include <asm/cpufeature.h>
17 #include <asm/hardirq.h>
20 #include "perf_event.h"
23 * Intel PerfMon, used on Core and later.
25 static u64 intel_perfmon_event_map
[PERF_COUNT_HW_MAX
] __read_mostly
=
27 [PERF_COUNT_HW_CPU_CYCLES
] = 0x003c,
28 [PERF_COUNT_HW_INSTRUCTIONS
] = 0x00c0,
29 [PERF_COUNT_HW_CACHE_REFERENCES
] = 0x4f2e,
30 [PERF_COUNT_HW_CACHE_MISSES
] = 0x412e,
31 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = 0x00c4,
32 [PERF_COUNT_HW_BRANCH_MISSES
] = 0x00c5,
33 [PERF_COUNT_HW_BUS_CYCLES
] = 0x013c,
34 [PERF_COUNT_HW_REF_CPU_CYCLES
] = 0x0300, /* pseudo-encoding */
37 static struct event_constraint intel_core_event_constraints
[] __read_mostly
=
39 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
40 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
41 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
42 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
43 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
44 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
48 static struct event_constraint intel_core2_event_constraints
[] __read_mostly
=
50 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
51 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
52 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
53 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
54 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
55 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
56 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
57 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
58 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
59 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
60 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
61 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
62 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
66 static struct event_constraint intel_nehalem_event_constraints
[] __read_mostly
=
68 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
69 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
70 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
71 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
72 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
73 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
74 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
75 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
76 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
77 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
78 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
82 static struct extra_reg intel_nehalem_extra_regs
[] __read_mostly
=
84 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
85 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0
, 0xffff, RSP_0
),
86 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
90 static struct event_constraint intel_westmere_event_constraints
[] __read_mostly
=
92 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
93 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
94 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
95 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
96 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
97 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
98 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
102 static struct event_constraint intel_snb_event_constraints
[] __read_mostly
=
104 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
105 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
106 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
107 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
108 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
109 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
110 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
111 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
112 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
113 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
114 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
115 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
119 static struct event_constraint intel_ivb_event_constraints
[] __read_mostly
=
121 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
122 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
123 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
124 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
125 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
126 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
127 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
128 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
129 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
130 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
131 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
132 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
133 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
135 * Errata BV98 -- MEM_*_RETIRED events can leak between counters of SMT
136 * siblings; disable these events because they can corrupt unrelated
139 INTEL_EVENT_CONSTRAINT(0xd0, 0x0), /* MEM_UOPS_RETIRED.* */
140 INTEL_EVENT_CONSTRAINT(0xd1, 0x0), /* MEM_LOAD_UOPS_RETIRED.* */
141 INTEL_EVENT_CONSTRAINT(0xd2, 0x0), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
142 INTEL_EVENT_CONSTRAINT(0xd3, 0x0), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
146 static struct extra_reg intel_westmere_extra_regs
[] __read_mostly
=
148 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
149 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0
, 0xffff, RSP_0
),
150 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1
, 0xffff, RSP_1
),
151 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
155 static struct event_constraint intel_v1_event_constraints
[] __read_mostly
=
160 static struct event_constraint intel_gen_event_constraints
[] __read_mostly
=
162 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
163 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
164 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
168 static struct event_constraint intel_slm_event_constraints
[] __read_mostly
=
170 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
171 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
172 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
176 static struct extra_reg intel_snb_extra_regs
[] __read_mostly
= {
177 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
178 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0
, 0x3f807f8fffull
, RSP_0
),
179 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1
, 0x3f807f8fffull
, RSP_1
),
180 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
184 static struct extra_reg intel_snbep_extra_regs
[] __read_mostly
= {
185 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
186 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0
, 0x3fffff8fffull
, RSP_0
),
187 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1
, 0x3fffff8fffull
, RSP_1
),
188 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
192 EVENT_ATTR_STR(mem
-loads
, mem_ld_nhm
, "event=0x0b,umask=0x10,ldlat=3");
193 EVENT_ATTR_STR(mem
-loads
, mem_ld_snb
, "event=0xcd,umask=0x1,ldlat=3");
194 EVENT_ATTR_STR(mem
-stores
, mem_st_snb
, "event=0xcd,umask=0x2");
196 struct attribute
*nhm_events_attrs
[] = {
197 EVENT_PTR(mem_ld_nhm
),
201 struct attribute
*snb_events_attrs
[] = {
202 EVENT_PTR(mem_ld_snb
),
203 EVENT_PTR(mem_st_snb
),
207 static struct event_constraint intel_hsw_event_constraints
[] = {
208 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
209 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
210 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
211 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.* */
212 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
213 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
214 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
215 INTEL_EVENT_CONSTRAINT(0x08a3, 0x4),
216 /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
217 INTEL_EVENT_CONSTRAINT(0x0ca3, 0x4),
218 /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
219 INTEL_EVENT_CONSTRAINT(0x04a3, 0xf),
223 static u64
intel_pmu_event_map(int hw_event
)
225 return intel_perfmon_event_map
[hw_event
];
228 #define SNB_DMND_DATA_RD (1ULL << 0)
229 #define SNB_DMND_RFO (1ULL << 1)
230 #define SNB_DMND_IFETCH (1ULL << 2)
231 #define SNB_DMND_WB (1ULL << 3)
232 #define SNB_PF_DATA_RD (1ULL << 4)
233 #define SNB_PF_RFO (1ULL << 5)
234 #define SNB_PF_IFETCH (1ULL << 6)
235 #define SNB_LLC_DATA_RD (1ULL << 7)
236 #define SNB_LLC_RFO (1ULL << 8)
237 #define SNB_LLC_IFETCH (1ULL << 9)
238 #define SNB_BUS_LOCKS (1ULL << 10)
239 #define SNB_STRM_ST (1ULL << 11)
240 #define SNB_OTHER (1ULL << 15)
241 #define SNB_RESP_ANY (1ULL << 16)
242 #define SNB_NO_SUPP (1ULL << 17)
243 #define SNB_LLC_HITM (1ULL << 18)
244 #define SNB_LLC_HITE (1ULL << 19)
245 #define SNB_LLC_HITS (1ULL << 20)
246 #define SNB_LLC_HITF (1ULL << 21)
247 #define SNB_LOCAL (1ULL << 22)
248 #define SNB_REMOTE (0xffULL << 23)
249 #define SNB_SNP_NONE (1ULL << 31)
250 #define SNB_SNP_NOT_NEEDED (1ULL << 32)
251 #define SNB_SNP_MISS (1ULL << 33)
252 #define SNB_NO_FWD (1ULL << 34)
253 #define SNB_SNP_FWD (1ULL << 35)
254 #define SNB_HITM (1ULL << 36)
255 #define SNB_NON_DRAM (1ULL << 37)
257 #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
258 #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
259 #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
261 #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
262 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
265 #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
266 #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
268 #define SNB_L3_ACCESS SNB_RESP_ANY
269 #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
271 static __initconst
const u64 snb_hw_cache_extra_regs
272 [PERF_COUNT_HW_CACHE_MAX
]
273 [PERF_COUNT_HW_CACHE_OP_MAX
]
274 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
278 [ C(RESULT_ACCESS
) ] = SNB_DMND_READ
|SNB_L3_ACCESS
,
279 [ C(RESULT_MISS
) ] = SNB_DMND_READ
|SNB_L3_MISS
,
282 [ C(RESULT_ACCESS
) ] = SNB_DMND_WRITE
|SNB_L3_ACCESS
,
283 [ C(RESULT_MISS
) ] = SNB_DMND_WRITE
|SNB_L3_MISS
,
285 [ C(OP_PREFETCH
) ] = {
286 [ C(RESULT_ACCESS
) ] = SNB_DMND_PREFETCH
|SNB_L3_ACCESS
,
287 [ C(RESULT_MISS
) ] = SNB_DMND_PREFETCH
|SNB_L3_MISS
,
292 [ C(RESULT_ACCESS
) ] = SNB_DMND_READ
|SNB_DRAM_ANY
,
293 [ C(RESULT_MISS
) ] = SNB_DMND_READ
|SNB_DRAM_REMOTE
,
296 [ C(RESULT_ACCESS
) ] = SNB_DMND_WRITE
|SNB_DRAM_ANY
,
297 [ C(RESULT_MISS
) ] = SNB_DMND_WRITE
|SNB_DRAM_REMOTE
,
299 [ C(OP_PREFETCH
) ] = {
300 [ C(RESULT_ACCESS
) ] = SNB_DMND_PREFETCH
|SNB_DRAM_ANY
,
301 [ C(RESULT_MISS
) ] = SNB_DMND_PREFETCH
|SNB_DRAM_REMOTE
,
306 static __initconst
const u64 snb_hw_cache_event_ids
307 [PERF_COUNT_HW_CACHE_MAX
]
308 [PERF_COUNT_HW_CACHE_OP_MAX
]
309 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
313 [ C(RESULT_ACCESS
) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
314 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPLACEMENT */
317 [ C(RESULT_ACCESS
) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
318 [ C(RESULT_MISS
) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
320 [ C(OP_PREFETCH
) ] = {
321 [ C(RESULT_ACCESS
) ] = 0x0,
322 [ C(RESULT_MISS
) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
327 [ C(RESULT_ACCESS
) ] = 0x0,
328 [ C(RESULT_MISS
) ] = 0x0280, /* ICACHE.MISSES */
331 [ C(RESULT_ACCESS
) ] = -1,
332 [ C(RESULT_MISS
) ] = -1,
334 [ C(OP_PREFETCH
) ] = {
335 [ C(RESULT_ACCESS
) ] = 0x0,
336 [ C(RESULT_MISS
) ] = 0x0,
341 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
342 [ C(RESULT_ACCESS
) ] = 0x01b7,
343 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
344 [ C(RESULT_MISS
) ] = 0x01b7,
347 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
348 [ C(RESULT_ACCESS
) ] = 0x01b7,
349 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
350 [ C(RESULT_MISS
) ] = 0x01b7,
352 [ C(OP_PREFETCH
) ] = {
353 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
354 [ C(RESULT_ACCESS
) ] = 0x01b7,
355 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
356 [ C(RESULT_MISS
) ] = 0x01b7,
361 [ C(RESULT_ACCESS
) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
362 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
365 [ C(RESULT_ACCESS
) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
366 [ C(RESULT_MISS
) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
368 [ C(OP_PREFETCH
) ] = {
369 [ C(RESULT_ACCESS
) ] = 0x0,
370 [ C(RESULT_MISS
) ] = 0x0,
375 [ C(RESULT_ACCESS
) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
376 [ C(RESULT_MISS
) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
379 [ C(RESULT_ACCESS
) ] = -1,
380 [ C(RESULT_MISS
) ] = -1,
382 [ C(OP_PREFETCH
) ] = {
383 [ C(RESULT_ACCESS
) ] = -1,
384 [ C(RESULT_MISS
) ] = -1,
389 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
390 [ C(RESULT_MISS
) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
393 [ C(RESULT_ACCESS
) ] = -1,
394 [ C(RESULT_MISS
) ] = -1,
396 [ C(OP_PREFETCH
) ] = {
397 [ C(RESULT_ACCESS
) ] = -1,
398 [ C(RESULT_MISS
) ] = -1,
403 [ C(RESULT_ACCESS
) ] = 0x01b7,
404 [ C(RESULT_MISS
) ] = 0x01b7,
407 [ C(RESULT_ACCESS
) ] = 0x01b7,
408 [ C(RESULT_MISS
) ] = 0x01b7,
410 [ C(OP_PREFETCH
) ] = {
411 [ C(RESULT_ACCESS
) ] = 0x01b7,
412 [ C(RESULT_MISS
) ] = 0x01b7,
418 static __initconst
const u64 westmere_hw_cache_event_ids
419 [PERF_COUNT_HW_CACHE_MAX
]
420 [PERF_COUNT_HW_CACHE_OP_MAX
]
421 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
425 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
426 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPL */
429 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
430 [ C(RESULT_MISS
) ] = 0x0251, /* L1D.M_REPL */
432 [ C(OP_PREFETCH
) ] = {
433 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
434 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
439 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
440 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
443 [ C(RESULT_ACCESS
) ] = -1,
444 [ C(RESULT_MISS
) ] = -1,
446 [ C(OP_PREFETCH
) ] = {
447 [ C(RESULT_ACCESS
) ] = 0x0,
448 [ C(RESULT_MISS
) ] = 0x0,
453 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
454 [ C(RESULT_ACCESS
) ] = 0x01b7,
455 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
456 [ C(RESULT_MISS
) ] = 0x01b7,
459 * Use RFO, not WRITEBACK, because a write miss would typically occur
463 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
464 [ C(RESULT_ACCESS
) ] = 0x01b7,
465 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
466 [ C(RESULT_MISS
) ] = 0x01b7,
468 [ C(OP_PREFETCH
) ] = {
469 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
470 [ C(RESULT_ACCESS
) ] = 0x01b7,
471 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
472 [ C(RESULT_MISS
) ] = 0x01b7,
477 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
478 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
481 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
482 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
484 [ C(OP_PREFETCH
) ] = {
485 [ C(RESULT_ACCESS
) ] = 0x0,
486 [ C(RESULT_MISS
) ] = 0x0,
491 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
492 [ C(RESULT_MISS
) ] = 0x0185, /* ITLB_MISSES.ANY */
495 [ C(RESULT_ACCESS
) ] = -1,
496 [ C(RESULT_MISS
) ] = -1,
498 [ C(OP_PREFETCH
) ] = {
499 [ C(RESULT_ACCESS
) ] = -1,
500 [ C(RESULT_MISS
) ] = -1,
505 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
506 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
509 [ C(RESULT_ACCESS
) ] = -1,
510 [ C(RESULT_MISS
) ] = -1,
512 [ C(OP_PREFETCH
) ] = {
513 [ C(RESULT_ACCESS
) ] = -1,
514 [ C(RESULT_MISS
) ] = -1,
519 [ C(RESULT_ACCESS
) ] = 0x01b7,
520 [ C(RESULT_MISS
) ] = 0x01b7,
523 [ C(RESULT_ACCESS
) ] = 0x01b7,
524 [ C(RESULT_MISS
) ] = 0x01b7,
526 [ C(OP_PREFETCH
) ] = {
527 [ C(RESULT_ACCESS
) ] = 0x01b7,
528 [ C(RESULT_MISS
) ] = 0x01b7,
534 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
535 * See IA32 SDM Vol 3B 30.6.1.3
538 #define NHM_DMND_DATA_RD (1 << 0)
539 #define NHM_DMND_RFO (1 << 1)
540 #define NHM_DMND_IFETCH (1 << 2)
541 #define NHM_DMND_WB (1 << 3)
542 #define NHM_PF_DATA_RD (1 << 4)
543 #define NHM_PF_DATA_RFO (1 << 5)
544 #define NHM_PF_IFETCH (1 << 6)
545 #define NHM_OFFCORE_OTHER (1 << 7)
546 #define NHM_UNCORE_HIT (1 << 8)
547 #define NHM_OTHER_CORE_HIT_SNP (1 << 9)
548 #define NHM_OTHER_CORE_HITM (1 << 10)
550 #define NHM_REMOTE_CACHE_FWD (1 << 12)
551 #define NHM_REMOTE_DRAM (1 << 13)
552 #define NHM_LOCAL_DRAM (1 << 14)
553 #define NHM_NON_DRAM (1 << 15)
555 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
556 #define NHM_REMOTE (NHM_REMOTE_DRAM)
558 #define NHM_DMND_READ (NHM_DMND_DATA_RD)
559 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
560 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
562 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
563 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
564 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
566 static __initconst
const u64 nehalem_hw_cache_extra_regs
567 [PERF_COUNT_HW_CACHE_MAX
]
568 [PERF_COUNT_HW_CACHE_OP_MAX
]
569 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
573 [ C(RESULT_ACCESS
) ] = NHM_DMND_READ
|NHM_L3_ACCESS
,
574 [ C(RESULT_MISS
) ] = NHM_DMND_READ
|NHM_L3_MISS
,
577 [ C(RESULT_ACCESS
) ] = NHM_DMND_WRITE
|NHM_L3_ACCESS
,
578 [ C(RESULT_MISS
) ] = NHM_DMND_WRITE
|NHM_L3_MISS
,
580 [ C(OP_PREFETCH
) ] = {
581 [ C(RESULT_ACCESS
) ] = NHM_DMND_PREFETCH
|NHM_L3_ACCESS
,
582 [ C(RESULT_MISS
) ] = NHM_DMND_PREFETCH
|NHM_L3_MISS
,
587 [ C(RESULT_ACCESS
) ] = NHM_DMND_READ
|NHM_LOCAL
|NHM_REMOTE
,
588 [ C(RESULT_MISS
) ] = NHM_DMND_READ
|NHM_REMOTE
,
591 [ C(RESULT_ACCESS
) ] = NHM_DMND_WRITE
|NHM_LOCAL
|NHM_REMOTE
,
592 [ C(RESULT_MISS
) ] = NHM_DMND_WRITE
|NHM_REMOTE
,
594 [ C(OP_PREFETCH
) ] = {
595 [ C(RESULT_ACCESS
) ] = NHM_DMND_PREFETCH
|NHM_LOCAL
|NHM_REMOTE
,
596 [ C(RESULT_MISS
) ] = NHM_DMND_PREFETCH
|NHM_REMOTE
,
601 static __initconst
const u64 nehalem_hw_cache_event_ids
602 [PERF_COUNT_HW_CACHE_MAX
]
603 [PERF_COUNT_HW_CACHE_OP_MAX
]
604 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
608 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
609 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPL */
612 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
613 [ C(RESULT_MISS
) ] = 0x0251, /* L1D.M_REPL */
615 [ C(OP_PREFETCH
) ] = {
616 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
617 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
622 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
623 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
626 [ C(RESULT_ACCESS
) ] = -1,
627 [ C(RESULT_MISS
) ] = -1,
629 [ C(OP_PREFETCH
) ] = {
630 [ C(RESULT_ACCESS
) ] = 0x0,
631 [ C(RESULT_MISS
) ] = 0x0,
636 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
637 [ C(RESULT_ACCESS
) ] = 0x01b7,
638 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
639 [ C(RESULT_MISS
) ] = 0x01b7,
642 * Use RFO, not WRITEBACK, because a write miss would typically occur
646 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
647 [ C(RESULT_ACCESS
) ] = 0x01b7,
648 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
649 [ C(RESULT_MISS
) ] = 0x01b7,
651 [ C(OP_PREFETCH
) ] = {
652 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
653 [ C(RESULT_ACCESS
) ] = 0x01b7,
654 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
655 [ C(RESULT_MISS
) ] = 0x01b7,
660 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
661 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
664 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
665 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
667 [ C(OP_PREFETCH
) ] = {
668 [ C(RESULT_ACCESS
) ] = 0x0,
669 [ C(RESULT_MISS
) ] = 0x0,
674 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
675 [ C(RESULT_MISS
) ] = 0x20c8, /* ITLB_MISS_RETIRED */
678 [ C(RESULT_ACCESS
) ] = -1,
679 [ C(RESULT_MISS
) ] = -1,
681 [ C(OP_PREFETCH
) ] = {
682 [ C(RESULT_ACCESS
) ] = -1,
683 [ C(RESULT_MISS
) ] = -1,
688 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
689 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
692 [ C(RESULT_ACCESS
) ] = -1,
693 [ C(RESULT_MISS
) ] = -1,
695 [ C(OP_PREFETCH
) ] = {
696 [ C(RESULT_ACCESS
) ] = -1,
697 [ C(RESULT_MISS
) ] = -1,
702 [ C(RESULT_ACCESS
) ] = 0x01b7,
703 [ C(RESULT_MISS
) ] = 0x01b7,
706 [ C(RESULT_ACCESS
) ] = 0x01b7,
707 [ C(RESULT_MISS
) ] = 0x01b7,
709 [ C(OP_PREFETCH
) ] = {
710 [ C(RESULT_ACCESS
) ] = 0x01b7,
711 [ C(RESULT_MISS
) ] = 0x01b7,
716 static __initconst
const u64 core2_hw_cache_event_ids
717 [PERF_COUNT_HW_CACHE_MAX
]
718 [PERF_COUNT_HW_CACHE_OP_MAX
]
719 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
723 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
724 [ C(RESULT_MISS
) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
727 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
728 [ C(RESULT_MISS
) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
730 [ C(OP_PREFETCH
) ] = {
731 [ C(RESULT_ACCESS
) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
732 [ C(RESULT_MISS
) ] = 0,
737 [ C(RESULT_ACCESS
) ] = 0x0080, /* L1I.READS */
738 [ C(RESULT_MISS
) ] = 0x0081, /* L1I.MISSES */
741 [ C(RESULT_ACCESS
) ] = -1,
742 [ C(RESULT_MISS
) ] = -1,
744 [ C(OP_PREFETCH
) ] = {
745 [ C(RESULT_ACCESS
) ] = 0,
746 [ C(RESULT_MISS
) ] = 0,
751 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
752 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
755 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
756 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
758 [ C(OP_PREFETCH
) ] = {
759 [ C(RESULT_ACCESS
) ] = 0,
760 [ C(RESULT_MISS
) ] = 0,
765 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
766 [ C(RESULT_MISS
) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
769 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
770 [ C(RESULT_MISS
) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
772 [ C(OP_PREFETCH
) ] = {
773 [ C(RESULT_ACCESS
) ] = 0,
774 [ C(RESULT_MISS
) ] = 0,
779 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
780 [ C(RESULT_MISS
) ] = 0x1282, /* ITLBMISSES */
783 [ C(RESULT_ACCESS
) ] = -1,
784 [ C(RESULT_MISS
) ] = -1,
786 [ C(OP_PREFETCH
) ] = {
787 [ C(RESULT_ACCESS
) ] = -1,
788 [ C(RESULT_MISS
) ] = -1,
793 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
794 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
797 [ C(RESULT_ACCESS
) ] = -1,
798 [ C(RESULT_MISS
) ] = -1,
800 [ C(OP_PREFETCH
) ] = {
801 [ C(RESULT_ACCESS
) ] = -1,
802 [ C(RESULT_MISS
) ] = -1,
807 static __initconst
const u64 atom_hw_cache_event_ids
808 [PERF_COUNT_HW_CACHE_MAX
]
809 [PERF_COUNT_HW_CACHE_OP_MAX
]
810 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
814 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE.LD */
815 [ C(RESULT_MISS
) ] = 0,
818 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE.ST */
819 [ C(RESULT_MISS
) ] = 0,
821 [ C(OP_PREFETCH
) ] = {
822 [ C(RESULT_ACCESS
) ] = 0x0,
823 [ C(RESULT_MISS
) ] = 0,
828 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
829 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
832 [ C(RESULT_ACCESS
) ] = -1,
833 [ C(RESULT_MISS
) ] = -1,
835 [ C(OP_PREFETCH
) ] = {
836 [ C(RESULT_ACCESS
) ] = 0,
837 [ C(RESULT_MISS
) ] = 0,
842 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
843 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
846 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
847 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
849 [ C(OP_PREFETCH
) ] = {
850 [ C(RESULT_ACCESS
) ] = 0,
851 [ C(RESULT_MISS
) ] = 0,
856 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
857 [ C(RESULT_MISS
) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
860 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
861 [ C(RESULT_MISS
) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
863 [ C(OP_PREFETCH
) ] = {
864 [ C(RESULT_ACCESS
) ] = 0,
865 [ C(RESULT_MISS
) ] = 0,
870 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
871 [ C(RESULT_MISS
) ] = 0x0282, /* ITLB.MISSES */
874 [ C(RESULT_ACCESS
) ] = -1,
875 [ C(RESULT_MISS
) ] = -1,
877 [ C(OP_PREFETCH
) ] = {
878 [ C(RESULT_ACCESS
) ] = -1,
879 [ C(RESULT_MISS
) ] = -1,
884 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
885 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
888 [ C(RESULT_ACCESS
) ] = -1,
889 [ C(RESULT_MISS
) ] = -1,
891 [ C(OP_PREFETCH
) ] = {
892 [ C(RESULT_ACCESS
) ] = -1,
893 [ C(RESULT_MISS
) ] = -1,
898 static struct extra_reg intel_slm_extra_regs
[] __read_mostly
=
900 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
901 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0
, 0x768005ffffull
, RSP_0
),
902 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1
, 0x768005ffffull
, RSP_1
),
906 #define SLM_DMND_READ SNB_DMND_DATA_RD
907 #define SLM_DMND_WRITE SNB_DMND_RFO
908 #define SLM_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
910 #define SLM_SNP_ANY (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
911 #define SLM_LLC_ACCESS SNB_RESP_ANY
912 #define SLM_LLC_MISS (SLM_SNP_ANY|SNB_NON_DRAM)
914 static __initconst
const u64 slm_hw_cache_extra_regs
915 [PERF_COUNT_HW_CACHE_MAX
]
916 [PERF_COUNT_HW_CACHE_OP_MAX
]
917 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
921 [ C(RESULT_ACCESS
) ] = SLM_DMND_READ
|SLM_LLC_ACCESS
,
922 [ C(RESULT_MISS
) ] = SLM_DMND_READ
|SLM_LLC_MISS
,
925 [ C(RESULT_ACCESS
) ] = SLM_DMND_WRITE
|SLM_LLC_ACCESS
,
926 [ C(RESULT_MISS
) ] = SLM_DMND_WRITE
|SLM_LLC_MISS
,
928 [ C(OP_PREFETCH
) ] = {
929 [ C(RESULT_ACCESS
) ] = SLM_DMND_PREFETCH
|SLM_LLC_ACCESS
,
930 [ C(RESULT_MISS
) ] = SLM_DMND_PREFETCH
|SLM_LLC_MISS
,
935 static __initconst
const u64 slm_hw_cache_event_ids
936 [PERF_COUNT_HW_CACHE_MAX
]
937 [PERF_COUNT_HW_CACHE_OP_MAX
]
938 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
942 [ C(RESULT_ACCESS
) ] = 0,
943 [ C(RESULT_MISS
) ] = 0x0104, /* LD_DCU_MISS */
946 [ C(RESULT_ACCESS
) ] = 0,
947 [ C(RESULT_MISS
) ] = 0,
949 [ C(OP_PREFETCH
) ] = {
950 [ C(RESULT_ACCESS
) ] = 0,
951 [ C(RESULT_MISS
) ] = 0,
956 [ C(RESULT_ACCESS
) ] = 0x0380, /* ICACHE.ACCESSES */
957 [ C(RESULT_MISS
) ] = 0x0280, /* ICACGE.MISSES */
960 [ C(RESULT_ACCESS
) ] = -1,
961 [ C(RESULT_MISS
) ] = -1,
963 [ C(OP_PREFETCH
) ] = {
964 [ C(RESULT_ACCESS
) ] = 0,
965 [ C(RESULT_MISS
) ] = 0,
970 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
971 [ C(RESULT_ACCESS
) ] = 0x01b7,
972 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
973 [ C(RESULT_MISS
) ] = 0x01b7,
976 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
977 [ C(RESULT_ACCESS
) ] = 0x01b7,
978 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
979 [ C(RESULT_MISS
) ] = 0x01b7,
981 [ C(OP_PREFETCH
) ] = {
982 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
983 [ C(RESULT_ACCESS
) ] = 0x01b7,
984 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
985 [ C(RESULT_MISS
) ] = 0x01b7,
990 [ C(RESULT_ACCESS
) ] = 0,
991 [ C(RESULT_MISS
) ] = 0x0804, /* LD_DTLB_MISS */
994 [ C(RESULT_ACCESS
) ] = 0,
995 [ C(RESULT_MISS
) ] = 0,
997 [ C(OP_PREFETCH
) ] = {
998 [ C(RESULT_ACCESS
) ] = 0,
999 [ C(RESULT_MISS
) ] = 0,
1004 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1005 [ C(RESULT_MISS
) ] = 0x0282, /* ITLB.MISSES */
1008 [ C(RESULT_ACCESS
) ] = -1,
1009 [ C(RESULT_MISS
) ] = -1,
1011 [ C(OP_PREFETCH
) ] = {
1012 [ C(RESULT_ACCESS
) ] = -1,
1013 [ C(RESULT_MISS
) ] = -1,
1018 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1019 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1022 [ C(RESULT_ACCESS
) ] = -1,
1023 [ C(RESULT_MISS
) ] = -1,
1025 [ C(OP_PREFETCH
) ] = {
1026 [ C(RESULT_ACCESS
) ] = -1,
1027 [ C(RESULT_MISS
) ] = -1,
1032 static inline bool intel_pmu_needs_lbr_smpl(struct perf_event
*event
)
1034 /* user explicitly requested branch sampling */
1035 if (has_branch_stack(event
))
1038 /* implicit branch sampling to correct PEBS skid */
1039 if (x86_pmu
.intel_cap
.pebs_trap
&& event
->attr
.precise_ip
> 1 &&
1040 x86_pmu
.intel_cap
.pebs_format
< 2)
1046 static void intel_pmu_disable_all(void)
1048 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1050 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0);
1052 if (test_bit(INTEL_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
))
1053 intel_pmu_disable_bts();
1055 intel_pmu_pebs_disable_all();
1056 intel_pmu_lbr_disable_all();
1059 static void intel_pmu_enable_all(int added
)
1061 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1063 intel_pmu_pebs_enable_all();
1064 intel_pmu_lbr_enable_all();
1065 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
,
1066 x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_guest_mask
);
1068 if (test_bit(INTEL_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
)) {
1069 struct perf_event
*event
=
1070 cpuc
->events
[INTEL_PMC_IDX_FIXED_BTS
];
1072 if (WARN_ON_ONCE(!event
))
1075 intel_pmu_enable_bts(event
->hw
.config
);
1081 * Intel Errata AAK100 (model 26)
1082 * Intel Errata AAP53 (model 30)
1083 * Intel Errata BD53 (model 44)
1085 * The official story:
1086 * These chips need to be 'reset' when adding counters by programming the
1087 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
1088 * in sequence on the same PMC or on different PMCs.
1090 * In practise it appears some of these events do in fact count, and
1091 * we need to programm all 4 events.
1093 static void intel_pmu_nhm_workaround(void)
1095 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1096 static const unsigned long nhm_magic
[4] = {
1102 struct perf_event
*event
;
1106 * The Errata requires below steps:
1107 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
1108 * 2) Configure 4 PERFEVTSELx with the magic events and clear
1109 * the corresponding PMCx;
1110 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
1111 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
1112 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
1116 * The real steps we choose are a little different from above.
1117 * A) To reduce MSR operations, we don't run step 1) as they
1118 * are already cleared before this function is called;
1119 * B) Call x86_perf_event_update to save PMCx before configuring
1120 * PERFEVTSELx with magic number;
1121 * C) With step 5), we do clear only when the PERFEVTSELx is
1122 * not used currently.
1123 * D) Call x86_perf_event_set_period to restore PMCx;
1126 /* We always operate 4 pairs of PERF Counters */
1127 for (i
= 0; i
< 4; i
++) {
1128 event
= cpuc
->events
[i
];
1130 x86_perf_event_update(event
);
1133 for (i
= 0; i
< 4; i
++) {
1134 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ i
, nhm_magic
[i
]);
1135 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0
+ i
, 0x0);
1138 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0xf);
1139 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0x0);
1141 for (i
= 0; i
< 4; i
++) {
1142 event
= cpuc
->events
[i
];
1145 x86_perf_event_set_period(event
);
1146 __x86_pmu_enable_event(&event
->hw
,
1147 ARCH_PERFMON_EVENTSEL_ENABLE
);
1149 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ i
, 0x0);
1153 static void intel_pmu_nhm_enable_all(int added
)
1156 intel_pmu_nhm_workaround();
1157 intel_pmu_enable_all(added
);
1160 static inline u64
intel_pmu_get_status(void)
1164 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS
, status
);
1169 static inline void intel_pmu_ack_status(u64 ack
)
1171 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL
, ack
);
1174 static void intel_pmu_disable_fixed(struct hw_perf_event
*hwc
)
1176 int idx
= hwc
->idx
- INTEL_PMC_IDX_FIXED
;
1179 mask
= 0xfULL
<< (idx
* 4);
1181 rdmsrl(hwc
->config_base
, ctrl_val
);
1183 wrmsrl(hwc
->config_base
, ctrl_val
);
1186 static inline bool event_is_checkpointed(struct perf_event
*event
)
1188 return (event
->hw
.config
& HSW_IN_TX_CHECKPOINTED
) != 0;
1191 static void intel_pmu_disable_event(struct perf_event
*event
)
1193 struct hw_perf_event
*hwc
= &event
->hw
;
1194 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1196 if (unlikely(hwc
->idx
== INTEL_PMC_IDX_FIXED_BTS
)) {
1197 intel_pmu_disable_bts();
1198 intel_pmu_drain_bts_buffer();
1202 cpuc
->intel_ctrl_guest_mask
&= ~(1ull << hwc
->idx
);
1203 cpuc
->intel_ctrl_host_mask
&= ~(1ull << hwc
->idx
);
1204 cpuc
->intel_cp_status
&= ~(1ull << hwc
->idx
);
1207 * must disable before any actual event
1208 * because any event may be combined with LBR
1210 if (intel_pmu_needs_lbr_smpl(event
))
1211 intel_pmu_lbr_disable(event
);
1213 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
1214 intel_pmu_disable_fixed(hwc
);
1218 x86_pmu_disable_event(event
);
1220 if (unlikely(event
->attr
.precise_ip
))
1221 intel_pmu_pebs_disable(event
);
1224 static void intel_pmu_enable_fixed(struct hw_perf_event
*hwc
)
1226 int idx
= hwc
->idx
- INTEL_PMC_IDX_FIXED
;
1227 u64 ctrl_val
, bits
, mask
;
1230 * Enable IRQ generation (0x8),
1231 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1235 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_USR
)
1237 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_OS
)
1241 * ANY bit is supported in v3 and up
1243 if (x86_pmu
.version
> 2 && hwc
->config
& ARCH_PERFMON_EVENTSEL_ANY
)
1247 mask
= 0xfULL
<< (idx
* 4);
1249 rdmsrl(hwc
->config_base
, ctrl_val
);
1252 wrmsrl(hwc
->config_base
, ctrl_val
);
1255 static void intel_pmu_enable_event(struct perf_event
*event
)
1257 struct hw_perf_event
*hwc
= &event
->hw
;
1258 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1260 if (unlikely(hwc
->idx
== INTEL_PMC_IDX_FIXED_BTS
)) {
1261 if (!__this_cpu_read(cpu_hw_events
.enabled
))
1264 intel_pmu_enable_bts(hwc
->config
);
1268 * must enabled before any actual event
1269 * because any event may be combined with LBR
1271 if (intel_pmu_needs_lbr_smpl(event
))
1272 intel_pmu_lbr_enable(event
);
1274 if (event
->attr
.exclude_host
)
1275 cpuc
->intel_ctrl_guest_mask
|= (1ull << hwc
->idx
);
1276 if (event
->attr
.exclude_guest
)
1277 cpuc
->intel_ctrl_host_mask
|= (1ull << hwc
->idx
);
1279 if (unlikely(event_is_checkpointed(event
)))
1280 cpuc
->intel_cp_status
|= (1ull << hwc
->idx
);
1282 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
1283 intel_pmu_enable_fixed(hwc
);
1287 if (unlikely(event
->attr
.precise_ip
))
1288 intel_pmu_pebs_enable(event
);
1290 __x86_pmu_enable_event(hwc
, ARCH_PERFMON_EVENTSEL_ENABLE
);
1294 * Save and restart an expired event. Called by NMI contexts,
1295 * so it has to be careful about preempting normal event ops:
1297 int intel_pmu_save_and_restart(struct perf_event
*event
)
1299 x86_perf_event_update(event
);
1301 * For a checkpointed counter always reset back to 0. This
1302 * avoids a situation where the counter overflows, aborts the
1303 * transaction and is then set back to shortly before the
1304 * overflow, and overflows and aborts again.
1306 if (unlikely(event_is_checkpointed(event
))) {
1307 /* No race with NMIs because the counter should not be armed */
1308 wrmsrl(event
->hw
.event_base
, 0);
1309 local64_set(&event
->hw
.prev_count
, 0);
1311 return x86_perf_event_set_period(event
);
1314 static void intel_pmu_reset(void)
1316 struct debug_store
*ds
= __this_cpu_read(cpu_hw_events
.ds
);
1317 unsigned long flags
;
1320 if (!x86_pmu
.num_counters
)
1323 local_irq_save(flags
);
1325 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
1327 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1328 wrmsrl_safe(x86_pmu_config_addr(idx
), 0ull);
1329 wrmsrl_safe(x86_pmu_event_addr(idx
), 0ull);
1331 for (idx
= 0; idx
< x86_pmu
.num_counters_fixed
; idx
++)
1332 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0
+ idx
, 0ull);
1335 ds
->bts_index
= ds
->bts_buffer_base
;
1337 local_irq_restore(flags
);
1341 * This handler is triggered by the local APIC, so the APIC IRQ handling
1344 static int intel_pmu_handle_irq(struct pt_regs
*regs
)
1346 struct perf_sample_data data
;
1347 struct cpu_hw_events
*cpuc
;
1352 cpuc
= this_cpu_ptr(&cpu_hw_events
);
1355 * No known reason to not always do late ACK,
1356 * but just in case do it opt-in.
1358 if (!x86_pmu
.late_ack
)
1359 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
1360 intel_pmu_disable_all();
1361 handled
= intel_pmu_drain_bts_buffer();
1362 status
= intel_pmu_get_status();
1368 intel_pmu_ack_status(status
);
1369 if (++loops
> 100) {
1370 static bool warned
= false;
1372 WARN(1, "perfevents: irq loop stuck!\n");
1373 perf_event_print_debug();
1380 inc_irq_stat(apic_perf_irqs
);
1382 intel_pmu_lbr_read();
1385 * CondChgd bit 63 doesn't mean any overflow status. Ignore
1386 * and clear the bit.
1388 if (__test_and_clear_bit(63, (unsigned long *)&status
)) {
1394 * PEBS overflow sets bit 62 in the global status register
1396 if (__test_and_clear_bit(62, (unsigned long *)&status
)) {
1398 x86_pmu
.drain_pebs(regs
);
1402 * Checkpointed counters can lead to 'spurious' PMIs because the
1403 * rollback caused by the PMI will have cleared the overflow status
1404 * bit. Therefore always force probe these counters.
1406 status
|= cpuc
->intel_cp_status
;
1408 for_each_set_bit(bit
, (unsigned long *)&status
, X86_PMC_IDX_MAX
) {
1409 struct perf_event
*event
= cpuc
->events
[bit
];
1413 if (!test_bit(bit
, cpuc
->active_mask
))
1416 if (!intel_pmu_save_and_restart(event
))
1419 perf_sample_data_init(&data
, 0, event
->hw
.last_period
);
1421 if (has_branch_stack(event
))
1422 data
.br_stack
= &cpuc
->lbr_stack
;
1424 if (perf_event_overflow(event
, &data
, regs
))
1425 x86_pmu_stop(event
, 0);
1429 * Repeat if there is more work to be done:
1431 status
= intel_pmu_get_status();
1436 intel_pmu_enable_all(0);
1438 * Only unmask the NMI after the overflow counters
1439 * have been reset. This avoids spurious NMIs on
1442 if (x86_pmu
.late_ack
)
1443 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
1447 static struct event_constraint
*
1448 intel_bts_constraints(struct perf_event
*event
)
1450 struct hw_perf_event
*hwc
= &event
->hw
;
1451 unsigned int hw_event
, bts_event
;
1453 if (event
->attr
.freq
)
1456 hw_event
= hwc
->config
& INTEL_ARCH_EVENT_MASK
;
1457 bts_event
= x86_pmu
.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS
);
1459 if (unlikely(hw_event
== bts_event
&& hwc
->sample_period
== 1))
1460 return &bts_constraint
;
1465 static int intel_alt_er(int idx
)
1467 if (!(x86_pmu
.er_flags
& ERF_HAS_RSP_1
))
1470 if (idx
== EXTRA_REG_RSP_0
)
1471 return EXTRA_REG_RSP_1
;
1473 if (idx
== EXTRA_REG_RSP_1
)
1474 return EXTRA_REG_RSP_0
;
1479 static void intel_fixup_er(struct perf_event
*event
, int idx
)
1481 event
->hw
.extra_reg
.idx
= idx
;
1483 if (idx
== EXTRA_REG_RSP_0
) {
1484 event
->hw
.config
&= ~INTEL_ARCH_EVENT_MASK
;
1485 event
->hw
.config
|= x86_pmu
.extra_regs
[EXTRA_REG_RSP_0
].event
;
1486 event
->hw
.extra_reg
.reg
= MSR_OFFCORE_RSP_0
;
1487 } else if (idx
== EXTRA_REG_RSP_1
) {
1488 event
->hw
.config
&= ~INTEL_ARCH_EVENT_MASK
;
1489 event
->hw
.config
|= x86_pmu
.extra_regs
[EXTRA_REG_RSP_1
].event
;
1490 event
->hw
.extra_reg
.reg
= MSR_OFFCORE_RSP_1
;
1495 * manage allocation of shared extra msr for certain events
1498 * per-cpu: to be shared between the various events on a single PMU
1499 * per-core: per-cpu + shared by HT threads
1501 static struct event_constraint
*
1502 __intel_shared_reg_get_constraints(struct cpu_hw_events
*cpuc
,
1503 struct perf_event
*event
,
1504 struct hw_perf_event_extra
*reg
)
1506 struct event_constraint
*c
= &emptyconstraint
;
1507 struct er_account
*era
;
1508 unsigned long flags
;
1512 * reg->alloc can be set due to existing state, so for fake cpuc we
1513 * need to ignore this, otherwise we might fail to allocate proper fake
1514 * state for this extra reg constraint. Also see the comment below.
1516 if (reg
->alloc
&& !cpuc
->is_fake
)
1517 return NULL
; /* call x86_get_event_constraint() */
1520 era
= &cpuc
->shared_regs
->regs
[idx
];
1522 * we use spin_lock_irqsave() to avoid lockdep issues when
1523 * passing a fake cpuc
1525 raw_spin_lock_irqsave(&era
->lock
, flags
);
1527 if (!atomic_read(&era
->ref
) || era
->config
== reg
->config
) {
1530 * If its a fake cpuc -- as per validate_{group,event}() we
1531 * shouldn't touch event state and we can avoid doing so
1532 * since both will only call get_event_constraints() once
1533 * on each event, this avoids the need for reg->alloc.
1535 * Not doing the ER fixup will only result in era->reg being
1536 * wrong, but since we won't actually try and program hardware
1537 * this isn't a problem either.
1539 if (!cpuc
->is_fake
) {
1540 if (idx
!= reg
->idx
)
1541 intel_fixup_er(event
, idx
);
1544 * x86_schedule_events() can call get_event_constraints()
1545 * multiple times on events in the case of incremental
1546 * scheduling(). reg->alloc ensures we only do the ER
1552 /* lock in msr value */
1553 era
->config
= reg
->config
;
1554 era
->reg
= reg
->reg
;
1557 atomic_inc(&era
->ref
);
1560 * need to call x86_get_event_constraint()
1561 * to check if associated event has constraints
1565 idx
= intel_alt_er(idx
);
1566 if (idx
!= reg
->idx
) {
1567 raw_spin_unlock_irqrestore(&era
->lock
, flags
);
1571 raw_spin_unlock_irqrestore(&era
->lock
, flags
);
1577 __intel_shared_reg_put_constraints(struct cpu_hw_events
*cpuc
,
1578 struct hw_perf_event_extra
*reg
)
1580 struct er_account
*era
;
1583 * Only put constraint if extra reg was actually allocated. Also takes
1584 * care of event which do not use an extra shared reg.
1586 * Also, if this is a fake cpuc we shouldn't touch any event state
1587 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
1588 * either since it'll be thrown out.
1590 if (!reg
->alloc
|| cpuc
->is_fake
)
1593 era
= &cpuc
->shared_regs
->regs
[reg
->idx
];
1595 /* one fewer user */
1596 atomic_dec(&era
->ref
);
1598 /* allocate again next time */
1602 static struct event_constraint
*
1603 intel_shared_regs_constraints(struct cpu_hw_events
*cpuc
,
1604 struct perf_event
*event
)
1606 struct event_constraint
*c
= NULL
, *d
;
1607 struct hw_perf_event_extra
*xreg
, *breg
;
1609 xreg
= &event
->hw
.extra_reg
;
1610 if (xreg
->idx
!= EXTRA_REG_NONE
) {
1611 c
= __intel_shared_reg_get_constraints(cpuc
, event
, xreg
);
1612 if (c
== &emptyconstraint
)
1615 breg
= &event
->hw
.branch_reg
;
1616 if (breg
->idx
!= EXTRA_REG_NONE
) {
1617 d
= __intel_shared_reg_get_constraints(cpuc
, event
, breg
);
1618 if (d
== &emptyconstraint
) {
1619 __intel_shared_reg_put_constraints(cpuc
, xreg
);
1626 struct event_constraint
*
1627 x86_get_event_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
1629 struct event_constraint
*c
;
1631 if (x86_pmu
.event_constraints
) {
1632 for_each_event_constraint(c
, x86_pmu
.event_constraints
) {
1633 if ((event
->hw
.config
& c
->cmask
) == c
->code
) {
1634 event
->hw
.flags
|= c
->flags
;
1640 return &unconstrained
;
1643 static struct event_constraint
*
1644 intel_get_event_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
1646 struct event_constraint
*c
;
1648 c
= intel_bts_constraints(event
);
1652 c
= intel_pebs_constraints(event
);
1656 c
= intel_shared_regs_constraints(cpuc
, event
);
1660 return x86_get_event_constraints(cpuc
, event
);
1664 intel_put_shared_regs_event_constraints(struct cpu_hw_events
*cpuc
,
1665 struct perf_event
*event
)
1667 struct hw_perf_event_extra
*reg
;
1669 reg
= &event
->hw
.extra_reg
;
1670 if (reg
->idx
!= EXTRA_REG_NONE
)
1671 __intel_shared_reg_put_constraints(cpuc
, reg
);
1673 reg
= &event
->hw
.branch_reg
;
1674 if (reg
->idx
!= EXTRA_REG_NONE
)
1675 __intel_shared_reg_put_constraints(cpuc
, reg
);
1678 static void intel_put_event_constraints(struct cpu_hw_events
*cpuc
,
1679 struct perf_event
*event
)
1681 intel_put_shared_regs_event_constraints(cpuc
, event
);
1684 static void intel_pebs_aliases_core2(struct perf_event
*event
)
1686 if ((event
->hw
.config
& X86_RAW_EVENT_MASK
) == 0x003c) {
1688 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1689 * (0x003c) so that we can use it with PEBS.
1691 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1692 * PEBS capable. However we can use INST_RETIRED.ANY_P
1693 * (0x00c0), which is a PEBS capable event, to get the same
1696 * INST_RETIRED.ANY_P counts the number of cycles that retires
1697 * CNTMASK instructions. By setting CNTMASK to a value (16)
1698 * larger than the maximum number of instructions that can be
1699 * retired per cycle (4) and then inverting the condition, we
1700 * count all cycles that retire 16 or less instructions, which
1703 * Thereby we gain a PEBS capable cycle counter.
1705 u64 alt_config
= X86_CONFIG(.event
=0xc0, .inv
=1, .cmask
=16);
1707 alt_config
|= (event
->hw
.config
& ~X86_RAW_EVENT_MASK
);
1708 event
->hw
.config
= alt_config
;
1712 static void intel_pebs_aliases_snb(struct perf_event
*event
)
1714 if ((event
->hw
.config
& X86_RAW_EVENT_MASK
) == 0x003c) {
1716 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1717 * (0x003c) so that we can use it with PEBS.
1719 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1720 * PEBS capable. However we can use UOPS_RETIRED.ALL
1721 * (0x01c2), which is a PEBS capable event, to get the same
1724 * UOPS_RETIRED.ALL counts the number of cycles that retires
1725 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
1726 * larger than the maximum number of micro-ops that can be
1727 * retired per cycle (4) and then inverting the condition, we
1728 * count all cycles that retire 16 or less micro-ops, which
1731 * Thereby we gain a PEBS capable cycle counter.
1733 u64 alt_config
= X86_CONFIG(.event
=0xc2, .umask
=0x01, .inv
=1, .cmask
=16);
1735 alt_config
|= (event
->hw
.config
& ~X86_RAW_EVENT_MASK
);
1736 event
->hw
.config
= alt_config
;
1740 static int intel_pmu_hw_config(struct perf_event
*event
)
1742 int ret
= x86_pmu_hw_config(event
);
1747 if (event
->attr
.precise_ip
&& x86_pmu
.pebs_aliases
)
1748 x86_pmu
.pebs_aliases(event
);
1750 if (intel_pmu_needs_lbr_smpl(event
)) {
1751 ret
= intel_pmu_setup_lbr_filter(event
);
1756 if (event
->attr
.type
!= PERF_TYPE_RAW
)
1759 if (!(event
->attr
.config
& ARCH_PERFMON_EVENTSEL_ANY
))
1762 if (x86_pmu
.version
< 3)
1765 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN
))
1768 event
->hw
.config
|= ARCH_PERFMON_EVENTSEL_ANY
;
1773 struct perf_guest_switch_msr
*perf_guest_get_msrs(int *nr
)
1775 if (x86_pmu
.guest_get_msrs
)
1776 return x86_pmu
.guest_get_msrs(nr
);
1780 EXPORT_SYMBOL_GPL(perf_guest_get_msrs
);
1782 static struct perf_guest_switch_msr
*intel_guest_get_msrs(int *nr
)
1784 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1785 struct perf_guest_switch_msr
*arr
= cpuc
->guest_switch_msrs
;
1787 arr
[0].msr
= MSR_CORE_PERF_GLOBAL_CTRL
;
1788 arr
[0].host
= x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_guest_mask
;
1789 arr
[0].guest
= x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_host_mask
;
1791 * If PMU counter has PEBS enabled it is not enough to disable counter
1792 * on a guest entry since PEBS memory write can overshoot guest entry
1793 * and corrupt guest memory. Disabling PEBS solves the problem.
1795 arr
[1].msr
= MSR_IA32_PEBS_ENABLE
;
1796 arr
[1].host
= cpuc
->pebs_enabled
;
1803 static struct perf_guest_switch_msr
*core_guest_get_msrs(int *nr
)
1805 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1806 struct perf_guest_switch_msr
*arr
= cpuc
->guest_switch_msrs
;
1809 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1810 struct perf_event
*event
= cpuc
->events
[idx
];
1812 arr
[idx
].msr
= x86_pmu_config_addr(idx
);
1813 arr
[idx
].host
= arr
[idx
].guest
= 0;
1815 if (!test_bit(idx
, cpuc
->active_mask
))
1818 arr
[idx
].host
= arr
[idx
].guest
=
1819 event
->hw
.config
| ARCH_PERFMON_EVENTSEL_ENABLE
;
1821 if (event
->attr
.exclude_host
)
1822 arr
[idx
].host
&= ~ARCH_PERFMON_EVENTSEL_ENABLE
;
1823 else if (event
->attr
.exclude_guest
)
1824 arr
[idx
].guest
&= ~ARCH_PERFMON_EVENTSEL_ENABLE
;
1827 *nr
= x86_pmu
.num_counters
;
1831 static void core_pmu_enable_event(struct perf_event
*event
)
1833 if (!event
->attr
.exclude_host
)
1834 x86_pmu_enable_event(event
);
1837 static void core_pmu_enable_all(int added
)
1839 struct cpu_hw_events
*cpuc
= this_cpu_ptr(&cpu_hw_events
);
1842 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1843 struct hw_perf_event
*hwc
= &cpuc
->events
[idx
]->hw
;
1845 if (!test_bit(idx
, cpuc
->active_mask
) ||
1846 cpuc
->events
[idx
]->attr
.exclude_host
)
1849 __x86_pmu_enable_event(hwc
, ARCH_PERFMON_EVENTSEL_ENABLE
);
1853 static int hsw_hw_config(struct perf_event
*event
)
1855 int ret
= intel_pmu_hw_config(event
);
1859 if (!boot_cpu_has(X86_FEATURE_RTM
) && !boot_cpu_has(X86_FEATURE_HLE
))
1861 event
->hw
.config
|= event
->attr
.config
& (HSW_IN_TX
|HSW_IN_TX_CHECKPOINTED
);
1864 * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with
1865 * PEBS or in ANY thread mode. Since the results are non-sensical forbid
1868 if ((event
->hw
.config
& (HSW_IN_TX
|HSW_IN_TX_CHECKPOINTED
)) &&
1869 ((event
->hw
.config
& ARCH_PERFMON_EVENTSEL_ANY
) ||
1870 event
->attr
.precise_ip
> 0))
1873 if (event_is_checkpointed(event
)) {
1875 * Sampling of checkpointed events can cause situations where
1876 * the CPU constantly aborts because of a overflow, which is
1877 * then checkpointed back and ignored. Forbid checkpointing
1880 * But still allow a long sampling period, so that perf stat
1883 if (event
->attr
.sample_period
> 0 &&
1884 event
->attr
.sample_period
< 0x7fffffff)
1890 static struct event_constraint counter2_constraint
=
1891 EVENT_CONSTRAINT(0, 0x4, 0);
1893 static struct event_constraint
*
1894 hsw_get_event_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
1896 struct event_constraint
*c
= intel_get_event_constraints(cpuc
, event
);
1898 /* Handle special quirk on in_tx_checkpointed only in counter 2 */
1899 if (event
->hw
.config
& HSW_IN_TX_CHECKPOINTED
) {
1900 if (c
->idxmsk64
& (1U << 2))
1901 return &counter2_constraint
;
1902 return &emptyconstraint
;
1908 PMU_FORMAT_ATTR(event
, "config:0-7" );
1909 PMU_FORMAT_ATTR(umask
, "config:8-15" );
1910 PMU_FORMAT_ATTR(edge
, "config:18" );
1911 PMU_FORMAT_ATTR(pc
, "config:19" );
1912 PMU_FORMAT_ATTR(any
, "config:21" ); /* v3 + */
1913 PMU_FORMAT_ATTR(inv
, "config:23" );
1914 PMU_FORMAT_ATTR(cmask
, "config:24-31" );
1915 PMU_FORMAT_ATTR(in_tx
, "config:32");
1916 PMU_FORMAT_ATTR(in_tx_cp
, "config:33");
1918 static struct attribute
*intel_arch_formats_attr
[] = {
1919 &format_attr_event
.attr
,
1920 &format_attr_umask
.attr
,
1921 &format_attr_edge
.attr
,
1922 &format_attr_pc
.attr
,
1923 &format_attr_inv
.attr
,
1924 &format_attr_cmask
.attr
,
1928 ssize_t
intel_event_sysfs_show(char *page
, u64 config
)
1930 u64 event
= (config
& ARCH_PERFMON_EVENTSEL_EVENT
);
1932 return x86_event_sysfs_show(page
, config
, event
);
1935 static __initconst
const struct x86_pmu core_pmu
= {
1937 .handle_irq
= x86_pmu_handle_irq
,
1938 .disable_all
= x86_pmu_disable_all
,
1939 .enable_all
= core_pmu_enable_all
,
1940 .enable
= core_pmu_enable_event
,
1941 .disable
= x86_pmu_disable_event
,
1942 .hw_config
= x86_pmu_hw_config
,
1943 .schedule_events
= x86_schedule_events
,
1944 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
1945 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
1946 .event_map
= intel_pmu_event_map
,
1947 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
1950 * Intel PMCs cannot be accessed sanely above 32 bit width,
1951 * so we install an artificial 1<<31 period regardless of
1952 * the generic event period:
1954 .max_period
= (1ULL << 31) - 1,
1955 .get_event_constraints
= intel_get_event_constraints
,
1956 .put_event_constraints
= intel_put_event_constraints
,
1957 .event_constraints
= intel_core_event_constraints
,
1958 .guest_get_msrs
= core_guest_get_msrs
,
1959 .format_attrs
= intel_arch_formats_attr
,
1960 .events_sysfs_show
= intel_event_sysfs_show
,
1963 struct intel_shared_regs
*allocate_shared_regs(int cpu
)
1965 struct intel_shared_regs
*regs
;
1968 regs
= kzalloc_node(sizeof(struct intel_shared_regs
),
1969 GFP_KERNEL
, cpu_to_node(cpu
));
1972 * initialize the locks to keep lockdep happy
1974 for (i
= 0; i
< EXTRA_REG_MAX
; i
++)
1975 raw_spin_lock_init(®s
->regs
[i
].lock
);
1982 static int intel_pmu_cpu_prepare(int cpu
)
1984 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1986 if (!(x86_pmu
.extra_regs
|| x86_pmu
.lbr_sel_map
))
1989 cpuc
->shared_regs
= allocate_shared_regs(cpu
);
1990 if (!cpuc
->shared_regs
)
1996 static void intel_pmu_cpu_starting(int cpu
)
1998 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1999 int core_id
= topology_core_id(cpu
);
2002 init_debug_store_on_cpu(cpu
);
2004 * Deal with CPUs that don't clear their LBRs on power-up.
2006 intel_pmu_lbr_reset();
2008 cpuc
->lbr_sel
= NULL
;
2010 if (!cpuc
->shared_regs
)
2013 if (!(x86_pmu
.er_flags
& ERF_NO_HT_SHARING
)) {
2014 for_each_cpu(i
, topology_thread_cpumask(cpu
)) {
2015 struct intel_shared_regs
*pc
;
2017 pc
= per_cpu(cpu_hw_events
, i
).shared_regs
;
2018 if (pc
&& pc
->core_id
== core_id
) {
2019 cpuc
->kfree_on_online
= cpuc
->shared_regs
;
2020 cpuc
->shared_regs
= pc
;
2024 cpuc
->shared_regs
->core_id
= core_id
;
2025 cpuc
->shared_regs
->refcnt
++;
2028 if (x86_pmu
.lbr_sel_map
)
2029 cpuc
->lbr_sel
= &cpuc
->shared_regs
->regs
[EXTRA_REG_LBR
];
2032 static void intel_pmu_cpu_dying(int cpu
)
2034 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
2035 struct intel_shared_regs
*pc
;
2037 pc
= cpuc
->shared_regs
;
2039 if (pc
->core_id
== -1 || --pc
->refcnt
== 0)
2041 cpuc
->shared_regs
= NULL
;
2044 fini_debug_store_on_cpu(cpu
);
2047 static void intel_pmu_flush_branch_stack(void)
2050 * Intel LBR does not tag entries with the
2051 * PID of the current task, then we need to
2053 * For now, we simply reset it
2056 intel_pmu_lbr_reset();
2059 PMU_FORMAT_ATTR(offcore_rsp
, "config1:0-63");
2061 PMU_FORMAT_ATTR(ldlat
, "config1:0-15");
2063 static struct attribute
*intel_arch3_formats_attr
[] = {
2064 &format_attr_event
.attr
,
2065 &format_attr_umask
.attr
,
2066 &format_attr_edge
.attr
,
2067 &format_attr_pc
.attr
,
2068 &format_attr_any
.attr
,
2069 &format_attr_inv
.attr
,
2070 &format_attr_cmask
.attr
,
2071 &format_attr_in_tx
.attr
,
2072 &format_attr_in_tx_cp
.attr
,
2074 &format_attr_offcore_rsp
.attr
, /* XXX do NHM/WSM + SNB breakout */
2075 &format_attr_ldlat
.attr
, /* PEBS load latency */
2079 static __initconst
const struct x86_pmu intel_pmu
= {
2081 .handle_irq
= intel_pmu_handle_irq
,
2082 .disable_all
= intel_pmu_disable_all
,
2083 .enable_all
= intel_pmu_enable_all
,
2084 .enable
= intel_pmu_enable_event
,
2085 .disable
= intel_pmu_disable_event
,
2086 .hw_config
= intel_pmu_hw_config
,
2087 .schedule_events
= x86_schedule_events
,
2088 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
2089 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
2090 .event_map
= intel_pmu_event_map
,
2091 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
2094 * Intel PMCs cannot be accessed sanely above 32 bit width,
2095 * so we install an artificial 1<<31 period regardless of
2096 * the generic event period:
2098 .max_period
= (1ULL << 31) - 1,
2099 .get_event_constraints
= intel_get_event_constraints
,
2100 .put_event_constraints
= intel_put_event_constraints
,
2101 .pebs_aliases
= intel_pebs_aliases_core2
,
2103 .format_attrs
= intel_arch3_formats_attr
,
2104 .events_sysfs_show
= intel_event_sysfs_show
,
2106 .cpu_prepare
= intel_pmu_cpu_prepare
,
2107 .cpu_starting
= intel_pmu_cpu_starting
,
2108 .cpu_dying
= intel_pmu_cpu_dying
,
2109 .guest_get_msrs
= intel_guest_get_msrs
,
2110 .flush_branch_stack
= intel_pmu_flush_branch_stack
,
2113 static __init
void intel_clovertown_quirk(void)
2116 * PEBS is unreliable due to:
2118 * AJ67 - PEBS may experience CPL leaks
2119 * AJ68 - PEBS PMI may be delayed by one event
2120 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
2121 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
2123 * AJ67 could be worked around by restricting the OS/USR flags.
2124 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
2126 * AJ106 could possibly be worked around by not allowing LBR
2127 * usage from PEBS, including the fixup.
2128 * AJ68 could possibly be worked around by always programming
2129 * a pebs_event_reset[0] value and coping with the lost events.
2131 * But taken together it might just make sense to not enable PEBS on
2134 pr_warn("PEBS disabled due to CPU errata\n");
2136 x86_pmu
.pebs_constraints
= NULL
;
2139 static int intel_snb_pebs_broken(int cpu
)
2141 u32 rev
= UINT_MAX
; /* default to broken for unknown models */
2143 switch (cpu_data(cpu
).x86_model
) {
2148 case 45: /* SNB-EP */
2149 switch (cpu_data(cpu
).x86_mask
) {
2150 case 6: rev
= 0x618; break;
2151 case 7: rev
= 0x70c; break;
2155 return (cpu_data(cpu
).microcode
< rev
);
2158 static void intel_snb_check_microcode(void)
2160 int pebs_broken
= 0;
2164 for_each_online_cpu(cpu
) {
2165 if ((pebs_broken
= intel_snb_pebs_broken(cpu
)))
2170 if (pebs_broken
== x86_pmu
.pebs_broken
)
2174 * Serialized by the microcode lock..
2176 if (x86_pmu
.pebs_broken
) {
2177 pr_info("PEBS enabled due to microcode update\n");
2178 x86_pmu
.pebs_broken
= 0;
2180 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
2181 x86_pmu
.pebs_broken
= 1;
2186 * Under certain circumstances, access certain MSR may cause #GP.
2187 * The function tests if the input MSR can be safely accessed.
2189 static bool check_msr(unsigned long msr
, u64 mask
)
2191 u64 val_old
, val_new
, val_tmp
;
2194 * Read the current value, change it and read it back to see if it
2195 * matches, this is needed to detect certain hardware emulators
2196 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
2198 if (rdmsrl_safe(msr
, &val_old
))
2202 * Only change the bits which can be updated by wrmsrl.
2204 val_tmp
= val_old
^ mask
;
2205 if (wrmsrl_safe(msr
, val_tmp
) ||
2206 rdmsrl_safe(msr
, &val_new
))
2209 if (val_new
!= val_tmp
)
2212 /* Here it's sure that the MSR can be safely accessed.
2213 * Restore the old value and return.
2215 wrmsrl(msr
, val_old
);
2220 static __init
void intel_sandybridge_quirk(void)
2222 x86_pmu
.check_microcode
= intel_snb_check_microcode
;
2223 intel_snb_check_microcode();
2226 static const struct { int id
; char *name
; } intel_arch_events_map
[] __initconst
= {
2227 { PERF_COUNT_HW_CPU_CYCLES
, "cpu cycles" },
2228 { PERF_COUNT_HW_INSTRUCTIONS
, "instructions" },
2229 { PERF_COUNT_HW_BUS_CYCLES
, "bus cycles" },
2230 { PERF_COUNT_HW_CACHE_REFERENCES
, "cache references" },
2231 { PERF_COUNT_HW_CACHE_MISSES
, "cache misses" },
2232 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS
, "branch instructions" },
2233 { PERF_COUNT_HW_BRANCH_MISSES
, "branch misses" },
2236 static __init
void intel_arch_events_quirk(void)
2240 /* disable event that reported as not presend by cpuid */
2241 for_each_set_bit(bit
, x86_pmu
.events_mask
, ARRAY_SIZE(intel_arch_events_map
)) {
2242 intel_perfmon_event_map
[intel_arch_events_map
[bit
].id
] = 0;
2243 pr_warn("CPUID marked event: \'%s\' unavailable\n",
2244 intel_arch_events_map
[bit
].name
);
2248 static __init
void intel_nehalem_quirk(void)
2250 union cpuid10_ebx ebx
;
2252 ebx
.full
= x86_pmu
.events_maskl
;
2253 if (ebx
.split
.no_branch_misses_retired
) {
2255 * Erratum AAJ80 detected, we work it around by using
2256 * the BR_MISP_EXEC.ANY event. This will over-count
2257 * branch-misses, but it's still much better than the
2258 * architectural event which is often completely bogus:
2260 intel_perfmon_event_map
[PERF_COUNT_HW_BRANCH_MISSES
] = 0x7f89;
2261 ebx
.split
.no_branch_misses_retired
= 0;
2262 x86_pmu
.events_maskl
= ebx
.full
;
2263 pr_info("CPU erratum AAJ80 worked around\n");
2267 EVENT_ATTR_STR(mem
-loads
, mem_ld_hsw
, "event=0xcd,umask=0x1,ldlat=3");
2268 EVENT_ATTR_STR(mem
-stores
, mem_st_hsw
, "event=0xd0,umask=0x82")
2270 /* Haswell special events */
2271 EVENT_ATTR_STR(tx
-start
, tx_start
, "event=0xc9,umask=0x1");
2272 EVENT_ATTR_STR(tx
-commit
, tx_commit
, "event=0xc9,umask=0x2");
2273 EVENT_ATTR_STR(tx
-abort
, tx_abort
, "event=0xc9,umask=0x4");
2274 EVENT_ATTR_STR(tx
-capacity
, tx_capacity
, "event=0x54,umask=0x2");
2275 EVENT_ATTR_STR(tx
-conflict
, tx_conflict
, "event=0x54,umask=0x1");
2276 EVENT_ATTR_STR(el
-start
, el_start
, "event=0xc8,umask=0x1");
2277 EVENT_ATTR_STR(el
-commit
, el_commit
, "event=0xc8,umask=0x2");
2278 EVENT_ATTR_STR(el
-abort
, el_abort
, "event=0xc8,umask=0x4");
2279 EVENT_ATTR_STR(el
-capacity
, el_capacity
, "event=0x54,umask=0x2");
2280 EVENT_ATTR_STR(el
-conflict
, el_conflict
, "event=0x54,umask=0x1");
2281 EVENT_ATTR_STR(cycles
-t
, cycles_t
, "event=0x3c,in_tx=1");
2282 EVENT_ATTR_STR(cycles
-ct
, cycles_ct
, "event=0x3c,in_tx=1,in_tx_cp=1");
2284 static struct attribute
*hsw_events_attrs
[] = {
2285 EVENT_PTR(tx_start
),
2286 EVENT_PTR(tx_commit
),
2287 EVENT_PTR(tx_abort
),
2288 EVENT_PTR(tx_capacity
),
2289 EVENT_PTR(tx_conflict
),
2290 EVENT_PTR(el_start
),
2291 EVENT_PTR(el_commit
),
2292 EVENT_PTR(el_abort
),
2293 EVENT_PTR(el_capacity
),
2294 EVENT_PTR(el_conflict
),
2295 EVENT_PTR(cycles_t
),
2296 EVENT_PTR(cycles_ct
),
2297 EVENT_PTR(mem_ld_hsw
),
2298 EVENT_PTR(mem_st_hsw
),
2302 __init
int intel_pmu_init(void)
2304 union cpuid10_edx edx
;
2305 union cpuid10_eax eax
;
2306 union cpuid10_ebx ebx
;
2307 struct event_constraint
*c
;
2308 unsigned int unused
;
2309 struct extra_reg
*er
;
2312 if (!cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
)) {
2313 switch (boot_cpu_data
.x86
) {
2315 return p6_pmu_init();
2317 return knc_pmu_init();
2319 return p4_pmu_init();
2325 * Check whether the Architectural PerfMon supports
2326 * Branch Misses Retired hw_event or not.
2328 cpuid(10, &eax
.full
, &ebx
.full
, &unused
, &edx
.full
);
2329 if (eax
.split
.mask_length
< ARCH_PERFMON_EVENTS_COUNT
)
2332 version
= eax
.split
.version_id
;
2336 x86_pmu
= intel_pmu
;
2338 x86_pmu
.version
= version
;
2339 x86_pmu
.num_counters
= eax
.split
.num_counters
;
2340 x86_pmu
.cntval_bits
= eax
.split
.bit_width
;
2341 x86_pmu
.cntval_mask
= (1ULL << eax
.split
.bit_width
) - 1;
2343 x86_pmu
.events_maskl
= ebx
.full
;
2344 x86_pmu
.events_mask_len
= eax
.split
.mask_length
;
2346 x86_pmu
.max_pebs_events
= min_t(unsigned, MAX_PEBS_EVENTS
, x86_pmu
.num_counters
);
2349 * Quirk: v2 perfmon does not report fixed-purpose events, so
2350 * assume at least 3 events:
2353 x86_pmu
.num_counters_fixed
= max((int)edx
.split
.num_counters_fixed
, 3);
2355 if (boot_cpu_has(X86_FEATURE_PDCM
)) {
2358 rdmsrl(MSR_IA32_PERF_CAPABILITIES
, capabilities
);
2359 x86_pmu
.intel_cap
.capabilities
= capabilities
;
2364 x86_add_quirk(intel_arch_events_quirk
); /* Install first, so it runs last */
2367 * Install the hw-cache-events table:
2369 switch (boot_cpu_data
.x86_model
) {
2370 case 14: /* 65nm Core "Yonah" */
2371 pr_cont("Core events, ");
2374 case 15: /* 65nm Core2 "Merom" */
2375 x86_add_quirk(intel_clovertown_quirk
);
2376 case 22: /* 65nm Core2 "Merom-L" */
2377 case 23: /* 45nm Core2 "Penryn" */
2378 case 29: /* 45nm Core2 "Dunnington (MP) */
2379 memcpy(hw_cache_event_ids
, core2_hw_cache_event_ids
,
2380 sizeof(hw_cache_event_ids
));
2382 intel_pmu_lbr_init_core();
2384 x86_pmu
.event_constraints
= intel_core2_event_constraints
;
2385 x86_pmu
.pebs_constraints
= intel_core2_pebs_event_constraints
;
2386 pr_cont("Core2 events, ");
2389 case 30: /* 45nm Nehalem */
2390 case 26: /* 45nm Nehalem-EP */
2391 case 46: /* 45nm Nehalem-EX */
2392 memcpy(hw_cache_event_ids
, nehalem_hw_cache_event_ids
,
2393 sizeof(hw_cache_event_ids
));
2394 memcpy(hw_cache_extra_regs
, nehalem_hw_cache_extra_regs
,
2395 sizeof(hw_cache_extra_regs
));
2397 intel_pmu_lbr_init_nhm();
2399 x86_pmu
.event_constraints
= intel_nehalem_event_constraints
;
2400 x86_pmu
.pebs_constraints
= intel_nehalem_pebs_event_constraints
;
2401 x86_pmu
.enable_all
= intel_pmu_nhm_enable_all
;
2402 x86_pmu
.extra_regs
= intel_nehalem_extra_regs
;
2404 x86_pmu
.cpu_events
= nhm_events_attrs
;
2406 /* UOPS_ISSUED.STALLED_CYCLES */
2407 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2408 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2409 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
2410 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
2411 X86_CONFIG(.event
=0xb1, .umask
=0x3f, .inv
=1, .cmask
=1);
2413 x86_add_quirk(intel_nehalem_quirk
);
2415 pr_cont("Nehalem events, ");
2418 case 28: /* 45nm Atom "Pineview" */
2419 case 38: /* 45nm Atom "Lincroft" */
2420 case 39: /* 32nm Atom "Penwell" */
2421 case 53: /* 32nm Atom "Cloverview" */
2422 case 54: /* 32nm Atom "Cedarview" */
2423 memcpy(hw_cache_event_ids
, atom_hw_cache_event_ids
,
2424 sizeof(hw_cache_event_ids
));
2426 intel_pmu_lbr_init_atom();
2428 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
2429 x86_pmu
.pebs_constraints
= intel_atom_pebs_event_constraints
;
2430 pr_cont("Atom events, ");
2433 case 55: /* 22nm Atom "Silvermont" */
2434 case 76: /* 14nm Atom "Airmont" */
2435 case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
2436 memcpy(hw_cache_event_ids
, slm_hw_cache_event_ids
,
2437 sizeof(hw_cache_event_ids
));
2438 memcpy(hw_cache_extra_regs
, slm_hw_cache_extra_regs
,
2439 sizeof(hw_cache_extra_regs
));
2441 intel_pmu_lbr_init_atom();
2443 x86_pmu
.event_constraints
= intel_slm_event_constraints
;
2444 x86_pmu
.pebs_constraints
= intel_slm_pebs_event_constraints
;
2445 x86_pmu
.extra_regs
= intel_slm_extra_regs
;
2446 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2447 pr_cont("Silvermont events, ");
2450 case 37: /* 32nm Westmere */
2451 case 44: /* 32nm Westmere-EP */
2452 case 47: /* 32nm Westmere-EX */
2453 memcpy(hw_cache_event_ids
, westmere_hw_cache_event_ids
,
2454 sizeof(hw_cache_event_ids
));
2455 memcpy(hw_cache_extra_regs
, nehalem_hw_cache_extra_regs
,
2456 sizeof(hw_cache_extra_regs
));
2458 intel_pmu_lbr_init_nhm();
2460 x86_pmu
.event_constraints
= intel_westmere_event_constraints
;
2461 x86_pmu
.enable_all
= intel_pmu_nhm_enable_all
;
2462 x86_pmu
.pebs_constraints
= intel_westmere_pebs_event_constraints
;
2463 x86_pmu
.extra_regs
= intel_westmere_extra_regs
;
2464 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2466 x86_pmu
.cpu_events
= nhm_events_attrs
;
2468 /* UOPS_ISSUED.STALLED_CYCLES */
2469 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2470 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2471 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
2472 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
2473 X86_CONFIG(.event
=0xb1, .umask
=0x3f, .inv
=1, .cmask
=1);
2475 pr_cont("Westmere events, ");
2478 case 42: /* 32nm SandyBridge */
2479 case 45: /* 32nm SandyBridge-E/EN/EP */
2480 x86_add_quirk(intel_sandybridge_quirk
);
2481 memcpy(hw_cache_event_ids
, snb_hw_cache_event_ids
,
2482 sizeof(hw_cache_event_ids
));
2483 memcpy(hw_cache_extra_regs
, snb_hw_cache_extra_regs
,
2484 sizeof(hw_cache_extra_regs
));
2486 intel_pmu_lbr_init_snb();
2488 x86_pmu
.event_constraints
= intel_snb_event_constraints
;
2489 x86_pmu
.pebs_constraints
= intel_snb_pebs_event_constraints
;
2490 x86_pmu
.pebs_aliases
= intel_pebs_aliases_snb
;
2491 if (boot_cpu_data
.x86_model
== 45)
2492 x86_pmu
.extra_regs
= intel_snbep_extra_regs
;
2494 x86_pmu
.extra_regs
= intel_snb_extra_regs
;
2495 /* all extra regs are per-cpu when HT is on */
2496 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2497 x86_pmu
.er_flags
|= ERF_NO_HT_SHARING
;
2499 x86_pmu
.cpu_events
= snb_events_attrs
;
2501 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
2502 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2503 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2504 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
2505 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
2506 X86_CONFIG(.event
=0xb1, .umask
=0x01, .inv
=1, .cmask
=1);
2508 pr_cont("SandyBridge events, ");
2511 case 58: /* 22nm IvyBridge */
2512 case 62: /* 22nm IvyBridge-EP/EX */
2513 memcpy(hw_cache_event_ids
, snb_hw_cache_event_ids
,
2514 sizeof(hw_cache_event_ids
));
2515 /* dTLB-load-misses on IVB is different than SNB */
2516 hw_cache_event_ids
[C(DTLB
)][C(OP_READ
)][C(RESULT_MISS
)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
2518 memcpy(hw_cache_extra_regs
, snb_hw_cache_extra_regs
,
2519 sizeof(hw_cache_extra_regs
));
2521 intel_pmu_lbr_init_snb();
2523 x86_pmu
.event_constraints
= intel_ivb_event_constraints
;
2524 x86_pmu
.pebs_constraints
= intel_ivb_pebs_event_constraints
;
2525 x86_pmu
.pebs_aliases
= intel_pebs_aliases_snb
;
2526 if (boot_cpu_data
.x86_model
== 62)
2527 x86_pmu
.extra_regs
= intel_snbep_extra_regs
;
2529 x86_pmu
.extra_regs
= intel_snb_extra_regs
;
2530 /* all extra regs are per-cpu when HT is on */
2531 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2532 x86_pmu
.er_flags
|= ERF_NO_HT_SHARING
;
2534 x86_pmu
.cpu_events
= snb_events_attrs
;
2536 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
2537 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2538 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2540 pr_cont("IvyBridge events, ");
2544 case 60: /* 22nm Haswell Core */
2545 case 63: /* 22nm Haswell Server */
2546 case 69: /* 22nm Haswell ULT */
2547 case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
2548 x86_pmu
.late_ack
= true;
2549 memcpy(hw_cache_event_ids
, snb_hw_cache_event_ids
, sizeof(hw_cache_event_ids
));
2550 memcpy(hw_cache_extra_regs
, snb_hw_cache_extra_regs
, sizeof(hw_cache_extra_regs
));
2552 intel_pmu_lbr_init_snb();
2554 x86_pmu
.event_constraints
= intel_hsw_event_constraints
;
2555 x86_pmu
.pebs_constraints
= intel_hsw_pebs_event_constraints
;
2556 x86_pmu
.extra_regs
= intel_snbep_extra_regs
;
2557 x86_pmu
.pebs_aliases
= intel_pebs_aliases_snb
;
2558 /* all extra regs are per-cpu when HT is on */
2559 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2560 x86_pmu
.er_flags
|= ERF_NO_HT_SHARING
;
2562 x86_pmu
.hw_config
= hsw_hw_config
;
2563 x86_pmu
.get_event_constraints
= hsw_get_event_constraints
;
2564 x86_pmu
.cpu_events
= hsw_events_attrs
;
2565 x86_pmu
.lbr_double_abort
= true;
2566 pr_cont("Haswell events, ");
2570 switch (x86_pmu
.version
) {
2572 x86_pmu
.event_constraints
= intel_v1_event_constraints
;
2573 pr_cont("generic architected perfmon v1, ");
2577 * default constraints for v2 and up
2579 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
2580 pr_cont("generic architected perfmon, ");
2585 if (x86_pmu
.num_counters
> INTEL_PMC_MAX_GENERIC
) {
2586 WARN(1, KERN_ERR
"hw perf events %d > max(%d), clipping!",
2587 x86_pmu
.num_counters
, INTEL_PMC_MAX_GENERIC
);
2588 x86_pmu
.num_counters
= INTEL_PMC_MAX_GENERIC
;
2590 x86_pmu
.intel_ctrl
= (1 << x86_pmu
.num_counters
) - 1;
2592 if (x86_pmu
.num_counters_fixed
> INTEL_PMC_MAX_FIXED
) {
2593 WARN(1, KERN_ERR
"hw perf events fixed %d > max(%d), clipping!",
2594 x86_pmu
.num_counters_fixed
, INTEL_PMC_MAX_FIXED
);
2595 x86_pmu
.num_counters_fixed
= INTEL_PMC_MAX_FIXED
;
2598 x86_pmu
.intel_ctrl
|=
2599 ((1LL << x86_pmu
.num_counters_fixed
)-1) << INTEL_PMC_IDX_FIXED
;
2601 if (x86_pmu
.event_constraints
) {
2603 * event on fixed counter2 (REF_CYCLES) only works on this
2604 * counter, so do not extend mask to generic counters
2606 for_each_event_constraint(c
, x86_pmu
.event_constraints
) {
2607 if (c
->cmask
!= FIXED_EVENT_FLAGS
2608 || c
->idxmsk64
== INTEL_PMC_MSK_FIXED_REF_CYCLES
) {
2612 c
->idxmsk64
|= (1ULL << x86_pmu
.num_counters
) - 1;
2613 c
->weight
+= x86_pmu
.num_counters
;
2618 * Access LBR MSR may cause #GP under certain circumstances.
2619 * E.g. KVM doesn't support LBR MSR
2620 * Check all LBT MSR here.
2621 * Disable LBR access if any LBR MSRs can not be accessed.
2623 if (x86_pmu
.lbr_nr
&& !check_msr(x86_pmu
.lbr_tos
, 0x3UL
))
2625 for (i
= 0; i
< x86_pmu
.lbr_nr
; i
++) {
2626 if (!(check_msr(x86_pmu
.lbr_from
+ i
, 0xffffUL
) &&
2627 check_msr(x86_pmu
.lbr_to
+ i
, 0xffffUL
)))
2632 * Access extra MSR may cause #GP under certain circumstances.
2633 * E.g. KVM doesn't support offcore event
2634 * Check all extra_regs here.
2636 if (x86_pmu
.extra_regs
) {
2637 for (er
= x86_pmu
.extra_regs
; er
->msr
; er
++) {
2638 er
->extra_msr_access
= check_msr(er
->msr
, 0x1ffUL
);
2639 /* Disable LBR select mapping */
2640 if ((er
->idx
== EXTRA_REG_LBR
) && !er
->extra_msr_access
)
2641 x86_pmu
.lbr_sel_map
= NULL
;
2645 /* Support full width counters using alternative MSR range */
2646 if (x86_pmu
.intel_cap
.full_width_write
) {
2647 x86_pmu
.max_period
= x86_pmu
.cntval_mask
;
2648 x86_pmu
.perfctr
= MSR_IA32_PMC0
;
2649 pr_cont("full-width counters, ");