4 * Used to coordinate shared registers between HT threads or
5 * among events on a single PMU.
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/stddef.h>
11 #include <linux/types.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/export.h>
16 #include <asm/cpufeature.h>
17 #include <asm/hardirq.h>
20 #include "perf_event.h"
23 * Intel PerfMon, used on Core and later.
25 static u64 intel_perfmon_event_map
[PERF_COUNT_HW_MAX
] __read_mostly
=
27 [PERF_COUNT_HW_CPU_CYCLES
] = 0x003c,
28 [PERF_COUNT_HW_INSTRUCTIONS
] = 0x00c0,
29 [PERF_COUNT_HW_CACHE_REFERENCES
] = 0x4f2e,
30 [PERF_COUNT_HW_CACHE_MISSES
] = 0x412e,
31 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = 0x00c4,
32 [PERF_COUNT_HW_BRANCH_MISSES
] = 0x00c5,
33 [PERF_COUNT_HW_BUS_CYCLES
] = 0x013c,
34 [PERF_COUNT_HW_REF_CPU_CYCLES
] = 0x0300, /* pseudo-encoding */
37 static struct event_constraint intel_core_event_constraints
[] __read_mostly
=
39 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
40 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
41 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
42 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
43 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
44 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
48 static struct event_constraint intel_core2_event_constraints
[] __read_mostly
=
50 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
51 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
52 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
53 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
54 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
55 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
56 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
57 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
58 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
59 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
60 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
61 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
62 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
66 static struct event_constraint intel_nehalem_event_constraints
[] __read_mostly
=
68 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
69 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
70 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
71 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
72 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
73 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
74 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
75 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
76 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
77 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
78 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
82 static struct extra_reg intel_nehalem_extra_regs
[] __read_mostly
=
84 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
85 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0
, 0xffff, RSP_0
),
86 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
90 static struct event_constraint intel_westmere_event_constraints
[] __read_mostly
=
92 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
93 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
94 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
95 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
96 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
97 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
98 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
102 static struct event_constraint intel_snb_event_constraints
[] __read_mostly
=
104 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
105 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
106 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
107 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
108 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
109 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
110 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
111 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
112 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
113 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
114 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
115 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
119 static struct event_constraint intel_ivb_event_constraints
[] __read_mostly
=
121 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
122 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
123 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
124 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
125 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
126 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
127 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
128 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
129 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
130 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
131 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
132 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
133 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
135 * Errata BV98 -- MEM_*_RETIRED events can leak between counters of SMT
136 * siblings; disable these events because they can corrupt unrelated
139 INTEL_EVENT_CONSTRAINT(0xd0, 0x0), /* MEM_UOPS_RETIRED.* */
140 INTEL_EVENT_CONSTRAINT(0xd1, 0x0), /* MEM_LOAD_UOPS_RETIRED.* */
141 INTEL_EVENT_CONSTRAINT(0xd2, 0x0), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
142 INTEL_EVENT_CONSTRAINT(0xd3, 0x0), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
146 static struct extra_reg intel_westmere_extra_regs
[] __read_mostly
=
148 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
149 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0
, 0xffff, RSP_0
),
150 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1
, 0xffff, RSP_1
),
151 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
155 static struct event_constraint intel_v1_event_constraints
[] __read_mostly
=
160 static struct event_constraint intel_gen_event_constraints
[] __read_mostly
=
162 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
163 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
164 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
168 static struct event_constraint intel_slm_event_constraints
[] __read_mostly
=
170 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
171 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
172 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
176 static struct extra_reg intel_snb_extra_regs
[] __read_mostly
= {
177 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
178 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0
, 0x3f807f8fffull
, RSP_0
),
179 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1
, 0x3f807f8fffull
, RSP_1
),
180 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
184 static struct extra_reg intel_snbep_extra_regs
[] __read_mostly
= {
185 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
186 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0
, 0x3fffff8fffull
, RSP_0
),
187 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1
, 0x3fffff8fffull
, RSP_1
),
188 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
192 EVENT_ATTR_STR(mem
-loads
, mem_ld_nhm
, "event=0x0b,umask=0x10,ldlat=3");
193 EVENT_ATTR_STR(mem
-loads
, mem_ld_snb
, "event=0xcd,umask=0x1,ldlat=3");
194 EVENT_ATTR_STR(mem
-stores
, mem_st_snb
, "event=0xcd,umask=0x2");
196 struct attribute
*nhm_events_attrs
[] = {
197 EVENT_PTR(mem_ld_nhm
),
201 struct attribute
*snb_events_attrs
[] = {
202 EVENT_PTR(mem_ld_snb
),
203 EVENT_PTR(mem_st_snb
),
207 static struct event_constraint intel_hsw_event_constraints
[] = {
208 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
209 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
210 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
211 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.* */
212 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
213 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
214 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
215 INTEL_EVENT_CONSTRAINT(0x08a3, 0x4),
216 /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
217 INTEL_EVENT_CONSTRAINT(0x0ca3, 0x4),
218 /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
219 INTEL_EVENT_CONSTRAINT(0x04a3, 0xf),
223 static struct event_constraint intel_bdw_event_constraints
[] = {
224 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
225 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
226 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
227 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
228 INTEL_EVENT_CONSTRAINT(0xa3, 0x4), /* CYCLE_ACTIVITY.* */
232 static u64
intel_pmu_event_map(int hw_event
)
234 return intel_perfmon_event_map
[hw_event
];
237 #define SNB_DMND_DATA_RD (1ULL << 0)
238 #define SNB_DMND_RFO (1ULL << 1)
239 #define SNB_DMND_IFETCH (1ULL << 2)
240 #define SNB_DMND_WB (1ULL << 3)
241 #define SNB_PF_DATA_RD (1ULL << 4)
242 #define SNB_PF_RFO (1ULL << 5)
243 #define SNB_PF_IFETCH (1ULL << 6)
244 #define SNB_LLC_DATA_RD (1ULL << 7)
245 #define SNB_LLC_RFO (1ULL << 8)
246 #define SNB_LLC_IFETCH (1ULL << 9)
247 #define SNB_BUS_LOCKS (1ULL << 10)
248 #define SNB_STRM_ST (1ULL << 11)
249 #define SNB_OTHER (1ULL << 15)
250 #define SNB_RESP_ANY (1ULL << 16)
251 #define SNB_NO_SUPP (1ULL << 17)
252 #define SNB_LLC_HITM (1ULL << 18)
253 #define SNB_LLC_HITE (1ULL << 19)
254 #define SNB_LLC_HITS (1ULL << 20)
255 #define SNB_LLC_HITF (1ULL << 21)
256 #define SNB_LOCAL (1ULL << 22)
257 #define SNB_REMOTE (0xffULL << 23)
258 #define SNB_SNP_NONE (1ULL << 31)
259 #define SNB_SNP_NOT_NEEDED (1ULL << 32)
260 #define SNB_SNP_MISS (1ULL << 33)
261 #define SNB_NO_FWD (1ULL << 34)
262 #define SNB_SNP_FWD (1ULL << 35)
263 #define SNB_HITM (1ULL << 36)
264 #define SNB_NON_DRAM (1ULL << 37)
266 #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
267 #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
268 #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
270 #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
271 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
274 #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
275 #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
277 #define SNB_L3_ACCESS SNB_RESP_ANY
278 #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
280 static __initconst
const u64 snb_hw_cache_extra_regs
281 [PERF_COUNT_HW_CACHE_MAX
]
282 [PERF_COUNT_HW_CACHE_OP_MAX
]
283 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
287 [ C(RESULT_ACCESS
) ] = SNB_DMND_READ
|SNB_L3_ACCESS
,
288 [ C(RESULT_MISS
) ] = SNB_DMND_READ
|SNB_L3_MISS
,
291 [ C(RESULT_ACCESS
) ] = SNB_DMND_WRITE
|SNB_L3_ACCESS
,
292 [ C(RESULT_MISS
) ] = SNB_DMND_WRITE
|SNB_L3_MISS
,
294 [ C(OP_PREFETCH
) ] = {
295 [ C(RESULT_ACCESS
) ] = SNB_DMND_PREFETCH
|SNB_L3_ACCESS
,
296 [ C(RESULT_MISS
) ] = SNB_DMND_PREFETCH
|SNB_L3_MISS
,
301 [ C(RESULT_ACCESS
) ] = SNB_DMND_READ
|SNB_DRAM_ANY
,
302 [ C(RESULT_MISS
) ] = SNB_DMND_READ
|SNB_DRAM_REMOTE
,
305 [ C(RESULT_ACCESS
) ] = SNB_DMND_WRITE
|SNB_DRAM_ANY
,
306 [ C(RESULT_MISS
) ] = SNB_DMND_WRITE
|SNB_DRAM_REMOTE
,
308 [ C(OP_PREFETCH
) ] = {
309 [ C(RESULT_ACCESS
) ] = SNB_DMND_PREFETCH
|SNB_DRAM_ANY
,
310 [ C(RESULT_MISS
) ] = SNB_DMND_PREFETCH
|SNB_DRAM_REMOTE
,
315 static __initconst
const u64 snb_hw_cache_event_ids
316 [PERF_COUNT_HW_CACHE_MAX
]
317 [PERF_COUNT_HW_CACHE_OP_MAX
]
318 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
322 [ C(RESULT_ACCESS
) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
323 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPLACEMENT */
326 [ C(RESULT_ACCESS
) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
327 [ C(RESULT_MISS
) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
329 [ C(OP_PREFETCH
) ] = {
330 [ C(RESULT_ACCESS
) ] = 0x0,
331 [ C(RESULT_MISS
) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
336 [ C(RESULT_ACCESS
) ] = 0x0,
337 [ C(RESULT_MISS
) ] = 0x0280, /* ICACHE.MISSES */
340 [ C(RESULT_ACCESS
) ] = -1,
341 [ C(RESULT_MISS
) ] = -1,
343 [ C(OP_PREFETCH
) ] = {
344 [ C(RESULT_ACCESS
) ] = 0x0,
345 [ C(RESULT_MISS
) ] = 0x0,
350 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
351 [ C(RESULT_ACCESS
) ] = 0x01b7,
352 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
353 [ C(RESULT_MISS
) ] = 0x01b7,
356 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
357 [ C(RESULT_ACCESS
) ] = 0x01b7,
358 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
359 [ C(RESULT_MISS
) ] = 0x01b7,
361 [ C(OP_PREFETCH
) ] = {
362 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
363 [ C(RESULT_ACCESS
) ] = 0x01b7,
364 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
365 [ C(RESULT_MISS
) ] = 0x01b7,
370 [ C(RESULT_ACCESS
) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
371 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
374 [ C(RESULT_ACCESS
) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
375 [ C(RESULT_MISS
) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
377 [ C(OP_PREFETCH
) ] = {
378 [ C(RESULT_ACCESS
) ] = 0x0,
379 [ C(RESULT_MISS
) ] = 0x0,
384 [ C(RESULT_ACCESS
) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
385 [ C(RESULT_MISS
) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
388 [ C(RESULT_ACCESS
) ] = -1,
389 [ C(RESULT_MISS
) ] = -1,
391 [ C(OP_PREFETCH
) ] = {
392 [ C(RESULT_ACCESS
) ] = -1,
393 [ C(RESULT_MISS
) ] = -1,
398 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
399 [ C(RESULT_MISS
) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
402 [ C(RESULT_ACCESS
) ] = -1,
403 [ C(RESULT_MISS
) ] = -1,
405 [ C(OP_PREFETCH
) ] = {
406 [ C(RESULT_ACCESS
) ] = -1,
407 [ C(RESULT_MISS
) ] = -1,
412 [ C(RESULT_ACCESS
) ] = 0x01b7,
413 [ C(RESULT_MISS
) ] = 0x01b7,
416 [ C(RESULT_ACCESS
) ] = 0x01b7,
417 [ C(RESULT_MISS
) ] = 0x01b7,
419 [ C(OP_PREFETCH
) ] = {
420 [ C(RESULT_ACCESS
) ] = 0x01b7,
421 [ C(RESULT_MISS
) ] = 0x01b7,
427 static __initconst
const u64 hsw_hw_cache_event_ids
428 [PERF_COUNT_HW_CACHE_MAX
]
429 [PERF_COUNT_HW_CACHE_OP_MAX
]
430 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
434 [ C(RESULT_ACCESS
) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
435 [ C(RESULT_MISS
) ] = 0x151, /* L1D.REPLACEMENT */
438 [ C(RESULT_ACCESS
) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
439 [ C(RESULT_MISS
) ] = 0x0,
441 [ C(OP_PREFETCH
) ] = {
442 [ C(RESULT_ACCESS
) ] = 0x0,
443 [ C(RESULT_MISS
) ] = 0x0,
448 [ C(RESULT_ACCESS
) ] = 0x0,
449 [ C(RESULT_MISS
) ] = 0x280, /* ICACHE.MISSES */
452 [ C(RESULT_ACCESS
) ] = -1,
453 [ C(RESULT_MISS
) ] = -1,
455 [ C(OP_PREFETCH
) ] = {
456 [ C(RESULT_ACCESS
) ] = 0x0,
457 [ C(RESULT_MISS
) ] = 0x0,
462 /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD */
463 [ C(RESULT_ACCESS
) ] = 0x1b7,
464 /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD|SUPPLIER_NONE|
466 [ C(RESULT_MISS
) ] = 0x1b7,
469 [ C(RESULT_ACCESS
) ] = 0x1b7, /* OFFCORE_RESPONSE:ALL_RFO */
470 /* OFFCORE_RESPONSE:ALL_RFO|SUPPLIER_NONE|L3_MISS|ANY_SNOOP */
471 [ C(RESULT_MISS
) ] = 0x1b7,
473 [ C(OP_PREFETCH
) ] = {
474 [ C(RESULT_ACCESS
) ] = 0x0,
475 [ C(RESULT_MISS
) ] = 0x0,
480 [ C(RESULT_ACCESS
) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
481 [ C(RESULT_MISS
) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
484 [ C(RESULT_ACCESS
) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
485 [ C(RESULT_MISS
) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
487 [ C(OP_PREFETCH
) ] = {
488 [ C(RESULT_ACCESS
) ] = 0x0,
489 [ C(RESULT_MISS
) ] = 0x0,
494 [ C(RESULT_ACCESS
) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */
495 [ C(RESULT_MISS
) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */
498 [ C(RESULT_ACCESS
) ] = -1,
499 [ C(RESULT_MISS
) ] = -1,
501 [ C(OP_PREFETCH
) ] = {
502 [ C(RESULT_ACCESS
) ] = -1,
503 [ C(RESULT_MISS
) ] = -1,
508 [ C(RESULT_ACCESS
) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
509 [ C(RESULT_MISS
) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
512 [ C(RESULT_ACCESS
) ] = -1,
513 [ C(RESULT_MISS
) ] = -1,
515 [ C(OP_PREFETCH
) ] = {
516 [ C(RESULT_ACCESS
) ] = -1,
517 [ C(RESULT_MISS
) ] = -1,
522 static __initconst
const u64 hsw_hw_cache_extra_regs
523 [PERF_COUNT_HW_CACHE_MAX
]
524 [PERF_COUNT_HW_CACHE_OP_MAX
]
525 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
529 /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD */
530 [ C(RESULT_ACCESS
) ] = 0x2d5,
531 /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD|SUPPLIER_NONE|
533 [ C(RESULT_MISS
) ] = 0x3fbc0202d5ull
,
536 [ C(RESULT_ACCESS
) ] = 0x122, /* OFFCORE_RESPONSE:ALL_RFO */
537 /* OFFCORE_RESPONSE:ALL_RFO|SUPPLIER_NONE|L3_MISS|ANY_SNOOP */
538 [ C(RESULT_MISS
) ] = 0x3fbc020122ull
,
540 [ C(OP_PREFETCH
) ] = {
541 [ C(RESULT_ACCESS
) ] = 0x0,
542 [ C(RESULT_MISS
) ] = 0x0,
547 static __initconst
const u64 westmere_hw_cache_event_ids
548 [PERF_COUNT_HW_CACHE_MAX
]
549 [PERF_COUNT_HW_CACHE_OP_MAX
]
550 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
554 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
555 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPL */
558 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
559 [ C(RESULT_MISS
) ] = 0x0251, /* L1D.M_REPL */
561 [ C(OP_PREFETCH
) ] = {
562 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
563 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
568 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
569 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
572 [ C(RESULT_ACCESS
) ] = -1,
573 [ C(RESULT_MISS
) ] = -1,
575 [ C(OP_PREFETCH
) ] = {
576 [ C(RESULT_ACCESS
) ] = 0x0,
577 [ C(RESULT_MISS
) ] = 0x0,
582 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
583 [ C(RESULT_ACCESS
) ] = 0x01b7,
584 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
585 [ C(RESULT_MISS
) ] = 0x01b7,
588 * Use RFO, not WRITEBACK, because a write miss would typically occur
592 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
593 [ C(RESULT_ACCESS
) ] = 0x01b7,
594 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
595 [ C(RESULT_MISS
) ] = 0x01b7,
597 [ C(OP_PREFETCH
) ] = {
598 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
599 [ C(RESULT_ACCESS
) ] = 0x01b7,
600 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
601 [ C(RESULT_MISS
) ] = 0x01b7,
606 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
607 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
610 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
611 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
613 [ C(OP_PREFETCH
) ] = {
614 [ C(RESULT_ACCESS
) ] = 0x0,
615 [ C(RESULT_MISS
) ] = 0x0,
620 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
621 [ C(RESULT_MISS
) ] = 0x0185, /* ITLB_MISSES.ANY */
624 [ C(RESULT_ACCESS
) ] = -1,
625 [ C(RESULT_MISS
) ] = -1,
627 [ C(OP_PREFETCH
) ] = {
628 [ C(RESULT_ACCESS
) ] = -1,
629 [ C(RESULT_MISS
) ] = -1,
634 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
635 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
638 [ C(RESULT_ACCESS
) ] = -1,
639 [ C(RESULT_MISS
) ] = -1,
641 [ C(OP_PREFETCH
) ] = {
642 [ C(RESULT_ACCESS
) ] = -1,
643 [ C(RESULT_MISS
) ] = -1,
648 [ C(RESULT_ACCESS
) ] = 0x01b7,
649 [ C(RESULT_MISS
) ] = 0x01b7,
652 [ C(RESULT_ACCESS
) ] = 0x01b7,
653 [ C(RESULT_MISS
) ] = 0x01b7,
655 [ C(OP_PREFETCH
) ] = {
656 [ C(RESULT_ACCESS
) ] = 0x01b7,
657 [ C(RESULT_MISS
) ] = 0x01b7,
663 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
664 * See IA32 SDM Vol 3B 30.6.1.3
667 #define NHM_DMND_DATA_RD (1 << 0)
668 #define NHM_DMND_RFO (1 << 1)
669 #define NHM_DMND_IFETCH (1 << 2)
670 #define NHM_DMND_WB (1 << 3)
671 #define NHM_PF_DATA_RD (1 << 4)
672 #define NHM_PF_DATA_RFO (1 << 5)
673 #define NHM_PF_IFETCH (1 << 6)
674 #define NHM_OFFCORE_OTHER (1 << 7)
675 #define NHM_UNCORE_HIT (1 << 8)
676 #define NHM_OTHER_CORE_HIT_SNP (1 << 9)
677 #define NHM_OTHER_CORE_HITM (1 << 10)
679 #define NHM_REMOTE_CACHE_FWD (1 << 12)
680 #define NHM_REMOTE_DRAM (1 << 13)
681 #define NHM_LOCAL_DRAM (1 << 14)
682 #define NHM_NON_DRAM (1 << 15)
684 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
685 #define NHM_REMOTE (NHM_REMOTE_DRAM)
687 #define NHM_DMND_READ (NHM_DMND_DATA_RD)
688 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
689 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
691 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
692 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
693 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
695 static __initconst
const u64 nehalem_hw_cache_extra_regs
696 [PERF_COUNT_HW_CACHE_MAX
]
697 [PERF_COUNT_HW_CACHE_OP_MAX
]
698 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
702 [ C(RESULT_ACCESS
) ] = NHM_DMND_READ
|NHM_L3_ACCESS
,
703 [ C(RESULT_MISS
) ] = NHM_DMND_READ
|NHM_L3_MISS
,
706 [ C(RESULT_ACCESS
) ] = NHM_DMND_WRITE
|NHM_L3_ACCESS
,
707 [ C(RESULT_MISS
) ] = NHM_DMND_WRITE
|NHM_L3_MISS
,
709 [ C(OP_PREFETCH
) ] = {
710 [ C(RESULT_ACCESS
) ] = NHM_DMND_PREFETCH
|NHM_L3_ACCESS
,
711 [ C(RESULT_MISS
) ] = NHM_DMND_PREFETCH
|NHM_L3_MISS
,
716 [ C(RESULT_ACCESS
) ] = NHM_DMND_READ
|NHM_LOCAL
|NHM_REMOTE
,
717 [ C(RESULT_MISS
) ] = NHM_DMND_READ
|NHM_REMOTE
,
720 [ C(RESULT_ACCESS
) ] = NHM_DMND_WRITE
|NHM_LOCAL
|NHM_REMOTE
,
721 [ C(RESULT_MISS
) ] = NHM_DMND_WRITE
|NHM_REMOTE
,
723 [ C(OP_PREFETCH
) ] = {
724 [ C(RESULT_ACCESS
) ] = NHM_DMND_PREFETCH
|NHM_LOCAL
|NHM_REMOTE
,
725 [ C(RESULT_MISS
) ] = NHM_DMND_PREFETCH
|NHM_REMOTE
,
730 static __initconst
const u64 nehalem_hw_cache_event_ids
731 [PERF_COUNT_HW_CACHE_MAX
]
732 [PERF_COUNT_HW_CACHE_OP_MAX
]
733 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
737 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
738 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPL */
741 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
742 [ C(RESULT_MISS
) ] = 0x0251, /* L1D.M_REPL */
744 [ C(OP_PREFETCH
) ] = {
745 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
746 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
751 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
752 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
755 [ C(RESULT_ACCESS
) ] = -1,
756 [ C(RESULT_MISS
) ] = -1,
758 [ C(OP_PREFETCH
) ] = {
759 [ C(RESULT_ACCESS
) ] = 0x0,
760 [ C(RESULT_MISS
) ] = 0x0,
765 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
766 [ C(RESULT_ACCESS
) ] = 0x01b7,
767 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
768 [ C(RESULT_MISS
) ] = 0x01b7,
771 * Use RFO, not WRITEBACK, because a write miss would typically occur
775 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
776 [ C(RESULT_ACCESS
) ] = 0x01b7,
777 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
778 [ C(RESULT_MISS
) ] = 0x01b7,
780 [ C(OP_PREFETCH
) ] = {
781 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
782 [ C(RESULT_ACCESS
) ] = 0x01b7,
783 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
784 [ C(RESULT_MISS
) ] = 0x01b7,
789 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
790 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
793 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
794 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
796 [ C(OP_PREFETCH
) ] = {
797 [ C(RESULT_ACCESS
) ] = 0x0,
798 [ C(RESULT_MISS
) ] = 0x0,
803 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
804 [ C(RESULT_MISS
) ] = 0x20c8, /* ITLB_MISS_RETIRED */
807 [ C(RESULT_ACCESS
) ] = -1,
808 [ C(RESULT_MISS
) ] = -1,
810 [ C(OP_PREFETCH
) ] = {
811 [ C(RESULT_ACCESS
) ] = -1,
812 [ C(RESULT_MISS
) ] = -1,
817 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
818 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
821 [ C(RESULT_ACCESS
) ] = -1,
822 [ C(RESULT_MISS
) ] = -1,
824 [ C(OP_PREFETCH
) ] = {
825 [ C(RESULT_ACCESS
) ] = -1,
826 [ C(RESULT_MISS
) ] = -1,
831 [ C(RESULT_ACCESS
) ] = 0x01b7,
832 [ C(RESULT_MISS
) ] = 0x01b7,
835 [ C(RESULT_ACCESS
) ] = 0x01b7,
836 [ C(RESULT_MISS
) ] = 0x01b7,
838 [ C(OP_PREFETCH
) ] = {
839 [ C(RESULT_ACCESS
) ] = 0x01b7,
840 [ C(RESULT_MISS
) ] = 0x01b7,
845 static __initconst
const u64 core2_hw_cache_event_ids
846 [PERF_COUNT_HW_CACHE_MAX
]
847 [PERF_COUNT_HW_CACHE_OP_MAX
]
848 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
852 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
853 [ C(RESULT_MISS
) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
856 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
857 [ C(RESULT_MISS
) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
859 [ C(OP_PREFETCH
) ] = {
860 [ C(RESULT_ACCESS
) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
861 [ C(RESULT_MISS
) ] = 0,
866 [ C(RESULT_ACCESS
) ] = 0x0080, /* L1I.READS */
867 [ C(RESULT_MISS
) ] = 0x0081, /* L1I.MISSES */
870 [ C(RESULT_ACCESS
) ] = -1,
871 [ C(RESULT_MISS
) ] = -1,
873 [ C(OP_PREFETCH
) ] = {
874 [ C(RESULT_ACCESS
) ] = 0,
875 [ C(RESULT_MISS
) ] = 0,
880 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
881 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
884 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
885 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
887 [ C(OP_PREFETCH
) ] = {
888 [ C(RESULT_ACCESS
) ] = 0,
889 [ C(RESULT_MISS
) ] = 0,
894 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
895 [ C(RESULT_MISS
) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
898 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
899 [ C(RESULT_MISS
) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
901 [ C(OP_PREFETCH
) ] = {
902 [ C(RESULT_ACCESS
) ] = 0,
903 [ C(RESULT_MISS
) ] = 0,
908 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
909 [ C(RESULT_MISS
) ] = 0x1282, /* ITLBMISSES */
912 [ C(RESULT_ACCESS
) ] = -1,
913 [ C(RESULT_MISS
) ] = -1,
915 [ C(OP_PREFETCH
) ] = {
916 [ C(RESULT_ACCESS
) ] = -1,
917 [ C(RESULT_MISS
) ] = -1,
922 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
923 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
926 [ C(RESULT_ACCESS
) ] = -1,
927 [ C(RESULT_MISS
) ] = -1,
929 [ C(OP_PREFETCH
) ] = {
930 [ C(RESULT_ACCESS
) ] = -1,
931 [ C(RESULT_MISS
) ] = -1,
936 static __initconst
const u64 atom_hw_cache_event_ids
937 [PERF_COUNT_HW_CACHE_MAX
]
938 [PERF_COUNT_HW_CACHE_OP_MAX
]
939 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
943 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE.LD */
944 [ C(RESULT_MISS
) ] = 0,
947 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE.ST */
948 [ C(RESULT_MISS
) ] = 0,
950 [ C(OP_PREFETCH
) ] = {
951 [ C(RESULT_ACCESS
) ] = 0x0,
952 [ C(RESULT_MISS
) ] = 0,
957 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
958 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
961 [ C(RESULT_ACCESS
) ] = -1,
962 [ C(RESULT_MISS
) ] = -1,
964 [ C(OP_PREFETCH
) ] = {
965 [ C(RESULT_ACCESS
) ] = 0,
966 [ C(RESULT_MISS
) ] = 0,
971 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
972 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
975 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
976 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
978 [ C(OP_PREFETCH
) ] = {
979 [ C(RESULT_ACCESS
) ] = 0,
980 [ C(RESULT_MISS
) ] = 0,
985 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
986 [ C(RESULT_MISS
) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
989 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
990 [ C(RESULT_MISS
) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
992 [ C(OP_PREFETCH
) ] = {
993 [ C(RESULT_ACCESS
) ] = 0,
994 [ C(RESULT_MISS
) ] = 0,
999 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1000 [ C(RESULT_MISS
) ] = 0x0282, /* ITLB.MISSES */
1003 [ C(RESULT_ACCESS
) ] = -1,
1004 [ C(RESULT_MISS
) ] = -1,
1006 [ C(OP_PREFETCH
) ] = {
1007 [ C(RESULT_ACCESS
) ] = -1,
1008 [ C(RESULT_MISS
) ] = -1,
1013 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1014 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1017 [ C(RESULT_ACCESS
) ] = -1,
1018 [ C(RESULT_MISS
) ] = -1,
1020 [ C(OP_PREFETCH
) ] = {
1021 [ C(RESULT_ACCESS
) ] = -1,
1022 [ C(RESULT_MISS
) ] = -1,
1027 static struct extra_reg intel_slm_extra_regs
[] __read_mostly
=
1029 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1030 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0
, 0x768005ffffull
, RSP_0
),
1031 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1
, 0x768005ffffull
, RSP_1
),
1035 #define SLM_DMND_READ SNB_DMND_DATA_RD
1036 #define SLM_DMND_WRITE SNB_DMND_RFO
1037 #define SLM_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1039 #define SLM_SNP_ANY (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
1040 #define SLM_LLC_ACCESS SNB_RESP_ANY
1041 #define SLM_LLC_MISS (SLM_SNP_ANY|SNB_NON_DRAM)
1043 static __initconst
const u64 slm_hw_cache_extra_regs
1044 [PERF_COUNT_HW_CACHE_MAX
]
1045 [PERF_COUNT_HW_CACHE_OP_MAX
]
1046 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
1050 [ C(RESULT_ACCESS
) ] = SLM_DMND_READ
|SLM_LLC_ACCESS
,
1051 [ C(RESULT_MISS
) ] = SLM_DMND_READ
|SLM_LLC_MISS
,
1054 [ C(RESULT_ACCESS
) ] = SLM_DMND_WRITE
|SLM_LLC_ACCESS
,
1055 [ C(RESULT_MISS
) ] = SLM_DMND_WRITE
|SLM_LLC_MISS
,
1057 [ C(OP_PREFETCH
) ] = {
1058 [ C(RESULT_ACCESS
) ] = SLM_DMND_PREFETCH
|SLM_LLC_ACCESS
,
1059 [ C(RESULT_MISS
) ] = SLM_DMND_PREFETCH
|SLM_LLC_MISS
,
1064 static __initconst
const u64 slm_hw_cache_event_ids
1065 [PERF_COUNT_HW_CACHE_MAX
]
1066 [PERF_COUNT_HW_CACHE_OP_MAX
]
1067 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
1071 [ C(RESULT_ACCESS
) ] = 0,
1072 [ C(RESULT_MISS
) ] = 0x0104, /* LD_DCU_MISS */
1075 [ C(RESULT_ACCESS
) ] = 0,
1076 [ C(RESULT_MISS
) ] = 0,
1078 [ C(OP_PREFETCH
) ] = {
1079 [ C(RESULT_ACCESS
) ] = 0,
1080 [ C(RESULT_MISS
) ] = 0,
1085 [ C(RESULT_ACCESS
) ] = 0x0380, /* ICACHE.ACCESSES */
1086 [ C(RESULT_MISS
) ] = 0x0280, /* ICACGE.MISSES */
1089 [ C(RESULT_ACCESS
) ] = -1,
1090 [ C(RESULT_MISS
) ] = -1,
1092 [ C(OP_PREFETCH
) ] = {
1093 [ C(RESULT_ACCESS
) ] = 0,
1094 [ C(RESULT_MISS
) ] = 0,
1099 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1100 [ C(RESULT_ACCESS
) ] = 0x01b7,
1101 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1102 [ C(RESULT_MISS
) ] = 0x01b7,
1105 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1106 [ C(RESULT_ACCESS
) ] = 0x01b7,
1107 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1108 [ C(RESULT_MISS
) ] = 0x01b7,
1110 [ C(OP_PREFETCH
) ] = {
1111 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1112 [ C(RESULT_ACCESS
) ] = 0x01b7,
1113 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1114 [ C(RESULT_MISS
) ] = 0x01b7,
1119 [ C(RESULT_ACCESS
) ] = 0,
1120 [ C(RESULT_MISS
) ] = 0x0804, /* LD_DTLB_MISS */
1123 [ C(RESULT_ACCESS
) ] = 0,
1124 [ C(RESULT_MISS
) ] = 0,
1126 [ C(OP_PREFETCH
) ] = {
1127 [ C(RESULT_ACCESS
) ] = 0,
1128 [ C(RESULT_MISS
) ] = 0,
1133 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1134 [ C(RESULT_MISS
) ] = 0x0282, /* ITLB.MISSES */
1137 [ C(RESULT_ACCESS
) ] = -1,
1138 [ C(RESULT_MISS
) ] = -1,
1140 [ C(OP_PREFETCH
) ] = {
1141 [ C(RESULT_ACCESS
) ] = -1,
1142 [ C(RESULT_MISS
) ] = -1,
1147 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1148 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1151 [ C(RESULT_ACCESS
) ] = -1,
1152 [ C(RESULT_MISS
) ] = -1,
1154 [ C(OP_PREFETCH
) ] = {
1155 [ C(RESULT_ACCESS
) ] = -1,
1156 [ C(RESULT_MISS
) ] = -1,
1161 static inline bool intel_pmu_needs_lbr_smpl(struct perf_event
*event
)
1163 /* user explicitly requested branch sampling */
1164 if (has_branch_stack(event
))
1167 /* implicit branch sampling to correct PEBS skid */
1168 if (x86_pmu
.intel_cap
.pebs_trap
&& event
->attr
.precise_ip
> 1 &&
1169 x86_pmu
.intel_cap
.pebs_format
< 2)
1175 static void intel_pmu_disable_all(void)
1177 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1179 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0);
1181 if (test_bit(INTEL_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
))
1182 intel_pmu_disable_bts();
1184 intel_pmu_pebs_disable_all();
1185 intel_pmu_lbr_disable_all();
1188 static void intel_pmu_enable_all(int added
)
1190 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1192 intel_pmu_pebs_enable_all();
1193 intel_pmu_lbr_enable_all();
1194 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
,
1195 x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_guest_mask
);
1197 if (test_bit(INTEL_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
)) {
1198 struct perf_event
*event
=
1199 cpuc
->events
[INTEL_PMC_IDX_FIXED_BTS
];
1201 if (WARN_ON_ONCE(!event
))
1204 intel_pmu_enable_bts(event
->hw
.config
);
1210 * Intel Errata AAK100 (model 26)
1211 * Intel Errata AAP53 (model 30)
1212 * Intel Errata BD53 (model 44)
1214 * The official story:
1215 * These chips need to be 'reset' when adding counters by programming the
1216 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
1217 * in sequence on the same PMC or on different PMCs.
1219 * In practise it appears some of these events do in fact count, and
1220 * we need to programm all 4 events.
1222 static void intel_pmu_nhm_workaround(void)
1224 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1225 static const unsigned long nhm_magic
[4] = {
1231 struct perf_event
*event
;
1235 * The Errata requires below steps:
1236 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
1237 * 2) Configure 4 PERFEVTSELx with the magic events and clear
1238 * the corresponding PMCx;
1239 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
1240 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
1241 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
1245 * The real steps we choose are a little different from above.
1246 * A) To reduce MSR operations, we don't run step 1) as they
1247 * are already cleared before this function is called;
1248 * B) Call x86_perf_event_update to save PMCx before configuring
1249 * PERFEVTSELx with magic number;
1250 * C) With step 5), we do clear only when the PERFEVTSELx is
1251 * not used currently.
1252 * D) Call x86_perf_event_set_period to restore PMCx;
1255 /* We always operate 4 pairs of PERF Counters */
1256 for (i
= 0; i
< 4; i
++) {
1257 event
= cpuc
->events
[i
];
1259 x86_perf_event_update(event
);
1262 for (i
= 0; i
< 4; i
++) {
1263 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ i
, nhm_magic
[i
]);
1264 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0
+ i
, 0x0);
1267 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0xf);
1268 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0x0);
1270 for (i
= 0; i
< 4; i
++) {
1271 event
= cpuc
->events
[i
];
1274 x86_perf_event_set_period(event
);
1275 __x86_pmu_enable_event(&event
->hw
,
1276 ARCH_PERFMON_EVENTSEL_ENABLE
);
1278 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ i
, 0x0);
1282 static void intel_pmu_nhm_enable_all(int added
)
1285 intel_pmu_nhm_workaround();
1286 intel_pmu_enable_all(added
);
1289 static inline u64
intel_pmu_get_status(void)
1293 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS
, status
);
1298 static inline void intel_pmu_ack_status(u64 ack
)
1300 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL
, ack
);
1303 static void intel_pmu_disable_fixed(struct hw_perf_event
*hwc
)
1305 int idx
= hwc
->idx
- INTEL_PMC_IDX_FIXED
;
1308 mask
= 0xfULL
<< (idx
* 4);
1310 rdmsrl(hwc
->config_base
, ctrl_val
);
1312 wrmsrl(hwc
->config_base
, ctrl_val
);
1315 static inline bool event_is_checkpointed(struct perf_event
*event
)
1317 return (event
->hw
.config
& HSW_IN_TX_CHECKPOINTED
) != 0;
1320 static void intel_pmu_disable_event(struct perf_event
*event
)
1322 struct hw_perf_event
*hwc
= &event
->hw
;
1323 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1325 if (unlikely(hwc
->idx
== INTEL_PMC_IDX_FIXED_BTS
)) {
1326 intel_pmu_disable_bts();
1327 intel_pmu_drain_bts_buffer();
1331 cpuc
->intel_ctrl_guest_mask
&= ~(1ull << hwc
->idx
);
1332 cpuc
->intel_ctrl_host_mask
&= ~(1ull << hwc
->idx
);
1333 cpuc
->intel_cp_status
&= ~(1ull << hwc
->idx
);
1336 * must disable before any actual event
1337 * because any event may be combined with LBR
1339 if (intel_pmu_needs_lbr_smpl(event
))
1340 intel_pmu_lbr_disable(event
);
1342 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
1343 intel_pmu_disable_fixed(hwc
);
1347 x86_pmu_disable_event(event
);
1349 if (unlikely(event
->attr
.precise_ip
))
1350 intel_pmu_pebs_disable(event
);
1353 static void intel_pmu_enable_fixed(struct hw_perf_event
*hwc
)
1355 int idx
= hwc
->idx
- INTEL_PMC_IDX_FIXED
;
1356 u64 ctrl_val
, bits
, mask
;
1359 * Enable IRQ generation (0x8),
1360 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1364 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_USR
)
1366 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_OS
)
1370 * ANY bit is supported in v3 and up
1372 if (x86_pmu
.version
> 2 && hwc
->config
& ARCH_PERFMON_EVENTSEL_ANY
)
1376 mask
= 0xfULL
<< (idx
* 4);
1378 rdmsrl(hwc
->config_base
, ctrl_val
);
1381 wrmsrl(hwc
->config_base
, ctrl_val
);
1384 static void intel_pmu_enable_event(struct perf_event
*event
)
1386 struct hw_perf_event
*hwc
= &event
->hw
;
1387 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1389 if (unlikely(hwc
->idx
== INTEL_PMC_IDX_FIXED_BTS
)) {
1390 if (!__this_cpu_read(cpu_hw_events
.enabled
))
1393 intel_pmu_enable_bts(hwc
->config
);
1397 * must enabled before any actual event
1398 * because any event may be combined with LBR
1400 if (intel_pmu_needs_lbr_smpl(event
))
1401 intel_pmu_lbr_enable(event
);
1403 if (event
->attr
.exclude_host
)
1404 cpuc
->intel_ctrl_guest_mask
|= (1ull << hwc
->idx
);
1405 if (event
->attr
.exclude_guest
)
1406 cpuc
->intel_ctrl_host_mask
|= (1ull << hwc
->idx
);
1408 if (unlikely(event_is_checkpointed(event
)))
1409 cpuc
->intel_cp_status
|= (1ull << hwc
->idx
);
1411 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
1412 intel_pmu_enable_fixed(hwc
);
1416 if (unlikely(event
->attr
.precise_ip
))
1417 intel_pmu_pebs_enable(event
);
1419 __x86_pmu_enable_event(hwc
, ARCH_PERFMON_EVENTSEL_ENABLE
);
1423 * Save and restart an expired event. Called by NMI contexts,
1424 * so it has to be careful about preempting normal event ops:
1426 int intel_pmu_save_and_restart(struct perf_event
*event
)
1428 x86_perf_event_update(event
);
1430 * For a checkpointed counter always reset back to 0. This
1431 * avoids a situation where the counter overflows, aborts the
1432 * transaction and is then set back to shortly before the
1433 * overflow, and overflows and aborts again.
1435 if (unlikely(event_is_checkpointed(event
))) {
1436 /* No race with NMIs because the counter should not be armed */
1437 wrmsrl(event
->hw
.event_base
, 0);
1438 local64_set(&event
->hw
.prev_count
, 0);
1440 return x86_perf_event_set_period(event
);
1443 static void intel_pmu_reset(void)
1445 struct debug_store
*ds
= __this_cpu_read(cpu_hw_events
.ds
);
1446 unsigned long flags
;
1449 if (!x86_pmu
.num_counters
)
1452 local_irq_save(flags
);
1454 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
1456 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1457 wrmsrl_safe(x86_pmu_config_addr(idx
), 0ull);
1458 wrmsrl_safe(x86_pmu_event_addr(idx
), 0ull);
1460 for (idx
= 0; idx
< x86_pmu
.num_counters_fixed
; idx
++)
1461 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0
+ idx
, 0ull);
1464 ds
->bts_index
= ds
->bts_buffer_base
;
1466 local_irq_restore(flags
);
1470 * This handler is triggered by the local APIC, so the APIC IRQ handling
1473 static int intel_pmu_handle_irq(struct pt_regs
*regs
)
1475 struct perf_sample_data data
;
1476 struct cpu_hw_events
*cpuc
;
1481 cpuc
= &__get_cpu_var(cpu_hw_events
);
1484 * No known reason to not always do late ACK,
1485 * but just in case do it opt-in.
1487 if (!x86_pmu
.late_ack
)
1488 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
1489 intel_pmu_disable_all();
1490 handled
= intel_pmu_drain_bts_buffer();
1491 status
= intel_pmu_get_status();
1497 intel_pmu_ack_status(status
);
1498 if (++loops
> 100) {
1499 static bool warned
= false;
1501 WARN(1, "perfevents: irq loop stuck!\n");
1502 perf_event_print_debug();
1509 inc_irq_stat(apic_perf_irqs
);
1511 intel_pmu_lbr_read();
1514 * CondChgd bit 63 doesn't mean any overflow status. Ignore
1515 * and clear the bit.
1517 if (__test_and_clear_bit(63, (unsigned long *)&status
)) {
1523 * PEBS overflow sets bit 62 in the global status register
1525 if (__test_and_clear_bit(62, (unsigned long *)&status
)) {
1527 x86_pmu
.drain_pebs(regs
);
1531 * Checkpointed counters can lead to 'spurious' PMIs because the
1532 * rollback caused by the PMI will have cleared the overflow status
1533 * bit. Therefore always force probe these counters.
1535 status
|= cpuc
->intel_cp_status
;
1537 for_each_set_bit(bit
, (unsigned long *)&status
, X86_PMC_IDX_MAX
) {
1538 struct perf_event
*event
= cpuc
->events
[bit
];
1542 if (!test_bit(bit
, cpuc
->active_mask
))
1545 if (!intel_pmu_save_and_restart(event
))
1548 perf_sample_data_init(&data
, 0, event
->hw
.last_period
);
1550 if (has_branch_stack(event
))
1551 data
.br_stack
= &cpuc
->lbr_stack
;
1553 if (perf_event_overflow(event
, &data
, regs
))
1554 x86_pmu_stop(event
, 0);
1558 * Repeat if there is more work to be done:
1560 status
= intel_pmu_get_status();
1565 intel_pmu_enable_all(0);
1567 * Only unmask the NMI after the overflow counters
1568 * have been reset. This avoids spurious NMIs on
1571 if (x86_pmu
.late_ack
)
1572 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
1576 static struct event_constraint
*
1577 intel_bts_constraints(struct perf_event
*event
)
1579 struct hw_perf_event
*hwc
= &event
->hw
;
1580 unsigned int hw_event
, bts_event
;
1582 if (event
->attr
.freq
)
1585 hw_event
= hwc
->config
& INTEL_ARCH_EVENT_MASK
;
1586 bts_event
= x86_pmu
.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS
);
1588 if (unlikely(hw_event
== bts_event
&& hwc
->sample_period
== 1))
1589 return &bts_constraint
;
1594 static int intel_alt_er(int idx
)
1596 if (!(x86_pmu
.er_flags
& ERF_HAS_RSP_1
))
1599 if (idx
== EXTRA_REG_RSP_0
)
1600 return EXTRA_REG_RSP_1
;
1602 if (idx
== EXTRA_REG_RSP_1
)
1603 return EXTRA_REG_RSP_0
;
1608 static void intel_fixup_er(struct perf_event
*event
, int idx
)
1610 event
->hw
.extra_reg
.idx
= idx
;
1612 if (idx
== EXTRA_REG_RSP_0
) {
1613 event
->hw
.config
&= ~INTEL_ARCH_EVENT_MASK
;
1614 event
->hw
.config
|= x86_pmu
.extra_regs
[EXTRA_REG_RSP_0
].event
;
1615 event
->hw
.extra_reg
.reg
= MSR_OFFCORE_RSP_0
;
1616 } else if (idx
== EXTRA_REG_RSP_1
) {
1617 event
->hw
.config
&= ~INTEL_ARCH_EVENT_MASK
;
1618 event
->hw
.config
|= x86_pmu
.extra_regs
[EXTRA_REG_RSP_1
].event
;
1619 event
->hw
.extra_reg
.reg
= MSR_OFFCORE_RSP_1
;
1624 * manage allocation of shared extra msr for certain events
1627 * per-cpu: to be shared between the various events on a single PMU
1628 * per-core: per-cpu + shared by HT threads
1630 static struct event_constraint
*
1631 __intel_shared_reg_get_constraints(struct cpu_hw_events
*cpuc
,
1632 struct perf_event
*event
,
1633 struct hw_perf_event_extra
*reg
)
1635 struct event_constraint
*c
= &emptyconstraint
;
1636 struct er_account
*era
;
1637 unsigned long flags
;
1641 * reg->alloc can be set due to existing state, so for fake cpuc we
1642 * need to ignore this, otherwise we might fail to allocate proper fake
1643 * state for this extra reg constraint. Also see the comment below.
1645 if (reg
->alloc
&& !cpuc
->is_fake
)
1646 return NULL
; /* call x86_get_event_constraint() */
1649 era
= &cpuc
->shared_regs
->regs
[idx
];
1651 * we use spin_lock_irqsave() to avoid lockdep issues when
1652 * passing a fake cpuc
1654 raw_spin_lock_irqsave(&era
->lock
, flags
);
1656 if (!atomic_read(&era
->ref
) || era
->config
== reg
->config
) {
1659 * If its a fake cpuc -- as per validate_{group,event}() we
1660 * shouldn't touch event state and we can avoid doing so
1661 * since both will only call get_event_constraints() once
1662 * on each event, this avoids the need for reg->alloc.
1664 * Not doing the ER fixup will only result in era->reg being
1665 * wrong, but since we won't actually try and program hardware
1666 * this isn't a problem either.
1668 if (!cpuc
->is_fake
) {
1669 if (idx
!= reg
->idx
)
1670 intel_fixup_er(event
, idx
);
1673 * x86_schedule_events() can call get_event_constraints()
1674 * multiple times on events in the case of incremental
1675 * scheduling(). reg->alloc ensures we only do the ER
1681 /* lock in msr value */
1682 era
->config
= reg
->config
;
1683 era
->reg
= reg
->reg
;
1686 atomic_inc(&era
->ref
);
1689 * need to call x86_get_event_constraint()
1690 * to check if associated event has constraints
1694 idx
= intel_alt_er(idx
);
1695 if (idx
!= reg
->idx
) {
1696 raw_spin_unlock_irqrestore(&era
->lock
, flags
);
1700 raw_spin_unlock_irqrestore(&era
->lock
, flags
);
1706 __intel_shared_reg_put_constraints(struct cpu_hw_events
*cpuc
,
1707 struct hw_perf_event_extra
*reg
)
1709 struct er_account
*era
;
1712 * Only put constraint if extra reg was actually allocated. Also takes
1713 * care of event which do not use an extra shared reg.
1715 * Also, if this is a fake cpuc we shouldn't touch any event state
1716 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
1717 * either since it'll be thrown out.
1719 if (!reg
->alloc
|| cpuc
->is_fake
)
1722 era
= &cpuc
->shared_regs
->regs
[reg
->idx
];
1724 /* one fewer user */
1725 atomic_dec(&era
->ref
);
1727 /* allocate again next time */
1731 static struct event_constraint
*
1732 intel_shared_regs_constraints(struct cpu_hw_events
*cpuc
,
1733 struct perf_event
*event
)
1735 struct event_constraint
*c
= NULL
, *d
;
1736 struct hw_perf_event_extra
*xreg
, *breg
;
1738 xreg
= &event
->hw
.extra_reg
;
1739 if (xreg
->idx
!= EXTRA_REG_NONE
) {
1740 c
= __intel_shared_reg_get_constraints(cpuc
, event
, xreg
);
1741 if (c
== &emptyconstraint
)
1744 breg
= &event
->hw
.branch_reg
;
1745 if (breg
->idx
!= EXTRA_REG_NONE
) {
1746 d
= __intel_shared_reg_get_constraints(cpuc
, event
, breg
);
1747 if (d
== &emptyconstraint
) {
1748 __intel_shared_reg_put_constraints(cpuc
, xreg
);
1755 struct event_constraint
*
1756 x86_get_event_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
1758 struct event_constraint
*c
;
1760 if (x86_pmu
.event_constraints
) {
1761 for_each_event_constraint(c
, x86_pmu
.event_constraints
) {
1762 if ((event
->hw
.config
& c
->cmask
) == c
->code
) {
1763 event
->hw
.flags
|= c
->flags
;
1769 return &unconstrained
;
1772 static struct event_constraint
*
1773 intel_get_event_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
1775 struct event_constraint
*c
;
1777 c
= intel_bts_constraints(event
);
1781 c
= intel_pebs_constraints(event
);
1785 c
= intel_shared_regs_constraints(cpuc
, event
);
1789 return x86_get_event_constraints(cpuc
, event
);
1793 intel_put_shared_regs_event_constraints(struct cpu_hw_events
*cpuc
,
1794 struct perf_event
*event
)
1796 struct hw_perf_event_extra
*reg
;
1798 reg
= &event
->hw
.extra_reg
;
1799 if (reg
->idx
!= EXTRA_REG_NONE
)
1800 __intel_shared_reg_put_constraints(cpuc
, reg
);
1802 reg
= &event
->hw
.branch_reg
;
1803 if (reg
->idx
!= EXTRA_REG_NONE
)
1804 __intel_shared_reg_put_constraints(cpuc
, reg
);
1807 static void intel_put_event_constraints(struct cpu_hw_events
*cpuc
,
1808 struct perf_event
*event
)
1810 intel_put_shared_regs_event_constraints(cpuc
, event
);
1813 static void intel_pebs_aliases_core2(struct perf_event
*event
)
1815 if ((event
->hw
.config
& X86_RAW_EVENT_MASK
) == 0x003c) {
1817 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1818 * (0x003c) so that we can use it with PEBS.
1820 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1821 * PEBS capable. However we can use INST_RETIRED.ANY_P
1822 * (0x00c0), which is a PEBS capable event, to get the same
1825 * INST_RETIRED.ANY_P counts the number of cycles that retires
1826 * CNTMASK instructions. By setting CNTMASK to a value (16)
1827 * larger than the maximum number of instructions that can be
1828 * retired per cycle (4) and then inverting the condition, we
1829 * count all cycles that retire 16 or less instructions, which
1832 * Thereby we gain a PEBS capable cycle counter.
1834 u64 alt_config
= X86_CONFIG(.event
=0xc0, .inv
=1, .cmask
=16);
1836 alt_config
|= (event
->hw
.config
& ~X86_RAW_EVENT_MASK
);
1837 event
->hw
.config
= alt_config
;
1841 static void intel_pebs_aliases_snb(struct perf_event
*event
)
1843 if ((event
->hw
.config
& X86_RAW_EVENT_MASK
) == 0x003c) {
1845 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1846 * (0x003c) so that we can use it with PEBS.
1848 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1849 * PEBS capable. However we can use UOPS_RETIRED.ALL
1850 * (0x01c2), which is a PEBS capable event, to get the same
1853 * UOPS_RETIRED.ALL counts the number of cycles that retires
1854 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
1855 * larger than the maximum number of micro-ops that can be
1856 * retired per cycle (4) and then inverting the condition, we
1857 * count all cycles that retire 16 or less micro-ops, which
1860 * Thereby we gain a PEBS capable cycle counter.
1862 u64 alt_config
= X86_CONFIG(.event
=0xc2, .umask
=0x01, .inv
=1, .cmask
=16);
1864 alt_config
|= (event
->hw
.config
& ~X86_RAW_EVENT_MASK
);
1865 event
->hw
.config
= alt_config
;
1869 static int intel_pmu_hw_config(struct perf_event
*event
)
1871 int ret
= x86_pmu_hw_config(event
);
1876 if (event
->attr
.precise_ip
&& x86_pmu
.pebs_aliases
)
1877 x86_pmu
.pebs_aliases(event
);
1879 if (intel_pmu_needs_lbr_smpl(event
)) {
1880 ret
= intel_pmu_setup_lbr_filter(event
);
1885 if (event
->attr
.type
!= PERF_TYPE_RAW
)
1888 if (!(event
->attr
.config
& ARCH_PERFMON_EVENTSEL_ANY
))
1891 if (x86_pmu
.version
< 3)
1894 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN
))
1897 event
->hw
.config
|= ARCH_PERFMON_EVENTSEL_ANY
;
1902 struct perf_guest_switch_msr
*perf_guest_get_msrs(int *nr
)
1904 if (x86_pmu
.guest_get_msrs
)
1905 return x86_pmu
.guest_get_msrs(nr
);
1909 EXPORT_SYMBOL_GPL(perf_guest_get_msrs
);
1911 static struct perf_guest_switch_msr
*intel_guest_get_msrs(int *nr
)
1913 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1914 struct perf_guest_switch_msr
*arr
= cpuc
->guest_switch_msrs
;
1916 arr
[0].msr
= MSR_CORE_PERF_GLOBAL_CTRL
;
1917 arr
[0].host
= x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_guest_mask
;
1918 arr
[0].guest
= x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_host_mask
;
1920 * If PMU counter has PEBS enabled it is not enough to disable counter
1921 * on a guest entry since PEBS memory write can overshoot guest entry
1922 * and corrupt guest memory. Disabling PEBS solves the problem.
1924 arr
[1].msr
= MSR_IA32_PEBS_ENABLE
;
1925 arr
[1].host
= cpuc
->pebs_enabled
;
1932 static struct perf_guest_switch_msr
*core_guest_get_msrs(int *nr
)
1934 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1935 struct perf_guest_switch_msr
*arr
= cpuc
->guest_switch_msrs
;
1938 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1939 struct perf_event
*event
= cpuc
->events
[idx
];
1941 arr
[idx
].msr
= x86_pmu_config_addr(idx
);
1942 arr
[idx
].host
= arr
[idx
].guest
= 0;
1944 if (!test_bit(idx
, cpuc
->active_mask
))
1947 arr
[idx
].host
= arr
[idx
].guest
=
1948 event
->hw
.config
| ARCH_PERFMON_EVENTSEL_ENABLE
;
1950 if (event
->attr
.exclude_host
)
1951 arr
[idx
].host
&= ~ARCH_PERFMON_EVENTSEL_ENABLE
;
1952 else if (event
->attr
.exclude_guest
)
1953 arr
[idx
].guest
&= ~ARCH_PERFMON_EVENTSEL_ENABLE
;
1956 *nr
= x86_pmu
.num_counters
;
1960 static void core_pmu_enable_event(struct perf_event
*event
)
1962 if (!event
->attr
.exclude_host
)
1963 x86_pmu_enable_event(event
);
1966 static void core_pmu_enable_all(int added
)
1968 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1971 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1972 struct hw_perf_event
*hwc
= &cpuc
->events
[idx
]->hw
;
1974 if (!test_bit(idx
, cpuc
->active_mask
) ||
1975 cpuc
->events
[idx
]->attr
.exclude_host
)
1978 __x86_pmu_enable_event(hwc
, ARCH_PERFMON_EVENTSEL_ENABLE
);
1982 static int hsw_hw_config(struct perf_event
*event
)
1984 int ret
= intel_pmu_hw_config(event
);
1988 if (!boot_cpu_has(X86_FEATURE_RTM
) && !boot_cpu_has(X86_FEATURE_HLE
))
1990 event
->hw
.config
|= event
->attr
.config
& (HSW_IN_TX
|HSW_IN_TX_CHECKPOINTED
);
1993 * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with
1994 * PEBS or in ANY thread mode. Since the results are non-sensical forbid
1997 if ((event
->hw
.config
& (HSW_IN_TX
|HSW_IN_TX_CHECKPOINTED
)) &&
1998 ((event
->hw
.config
& ARCH_PERFMON_EVENTSEL_ANY
) ||
1999 event
->attr
.precise_ip
> 0))
2002 if (event_is_checkpointed(event
)) {
2004 * Sampling of checkpointed events can cause situations where
2005 * the CPU constantly aborts because of a overflow, which is
2006 * then checkpointed back and ignored. Forbid checkpointing
2009 * But still allow a long sampling period, so that perf stat
2012 if (event
->attr
.sample_period
> 0 &&
2013 event
->attr
.sample_period
< 0x7fffffff)
2019 static struct event_constraint counter2_constraint
=
2020 EVENT_CONSTRAINT(0, 0x4, 0);
2022 static struct event_constraint
*
2023 hsw_get_event_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
2025 struct event_constraint
*c
= intel_get_event_constraints(cpuc
, event
);
2027 /* Handle special quirk on in_tx_checkpointed only in counter 2 */
2028 if (event
->hw
.config
& HSW_IN_TX_CHECKPOINTED
) {
2029 if (c
->idxmsk64
& (1U << 2))
2030 return &counter2_constraint
;
2031 return &emptyconstraint
;
2037 PMU_FORMAT_ATTR(event
, "config:0-7" );
2038 PMU_FORMAT_ATTR(umask
, "config:8-15" );
2039 PMU_FORMAT_ATTR(edge
, "config:18" );
2040 PMU_FORMAT_ATTR(pc
, "config:19" );
2041 PMU_FORMAT_ATTR(any
, "config:21" ); /* v3 + */
2042 PMU_FORMAT_ATTR(inv
, "config:23" );
2043 PMU_FORMAT_ATTR(cmask
, "config:24-31" );
2044 PMU_FORMAT_ATTR(in_tx
, "config:32");
2045 PMU_FORMAT_ATTR(in_tx_cp
, "config:33");
2047 static struct attribute
*intel_arch_formats_attr
[] = {
2048 &format_attr_event
.attr
,
2049 &format_attr_umask
.attr
,
2050 &format_attr_edge
.attr
,
2051 &format_attr_pc
.attr
,
2052 &format_attr_inv
.attr
,
2053 &format_attr_cmask
.attr
,
2057 ssize_t
intel_event_sysfs_show(char *page
, u64 config
)
2059 u64 event
= (config
& ARCH_PERFMON_EVENTSEL_EVENT
);
2061 return x86_event_sysfs_show(page
, config
, event
);
2064 static __initconst
const struct x86_pmu core_pmu
= {
2066 .handle_irq
= x86_pmu_handle_irq
,
2067 .disable_all
= x86_pmu_disable_all
,
2068 .enable_all
= core_pmu_enable_all
,
2069 .enable
= core_pmu_enable_event
,
2070 .disable
= x86_pmu_disable_event
,
2071 .hw_config
= x86_pmu_hw_config
,
2072 .schedule_events
= x86_schedule_events
,
2073 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
2074 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
2075 .event_map
= intel_pmu_event_map
,
2076 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
2079 * Intel PMCs cannot be accessed sanely above 32 bit width,
2080 * so we install an artificial 1<<31 period regardless of
2081 * the generic event period:
2083 .max_period
= (1ULL << 31) - 1,
2084 .get_event_constraints
= intel_get_event_constraints
,
2085 .put_event_constraints
= intel_put_event_constraints
,
2086 .event_constraints
= intel_core_event_constraints
,
2087 .guest_get_msrs
= core_guest_get_msrs
,
2088 .format_attrs
= intel_arch_formats_attr
,
2089 .events_sysfs_show
= intel_event_sysfs_show
,
2092 struct intel_shared_regs
*allocate_shared_regs(int cpu
)
2094 struct intel_shared_regs
*regs
;
2097 regs
= kzalloc_node(sizeof(struct intel_shared_regs
),
2098 GFP_KERNEL
, cpu_to_node(cpu
));
2101 * initialize the locks to keep lockdep happy
2103 for (i
= 0; i
< EXTRA_REG_MAX
; i
++)
2104 raw_spin_lock_init(®s
->regs
[i
].lock
);
2111 static int intel_pmu_cpu_prepare(int cpu
)
2113 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
2115 if (!(x86_pmu
.extra_regs
|| x86_pmu
.lbr_sel_map
))
2118 cpuc
->shared_regs
= allocate_shared_regs(cpu
);
2119 if (!cpuc
->shared_regs
)
2125 static void intel_pmu_cpu_starting(int cpu
)
2127 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
2128 int core_id
= topology_core_id(cpu
);
2131 init_debug_store_on_cpu(cpu
);
2133 * Deal with CPUs that don't clear their LBRs on power-up.
2135 intel_pmu_lbr_reset();
2137 cpuc
->lbr_sel
= NULL
;
2139 if (!cpuc
->shared_regs
)
2142 if (!(x86_pmu
.er_flags
& ERF_NO_HT_SHARING
)) {
2143 for_each_cpu(i
, topology_thread_cpumask(cpu
)) {
2144 struct intel_shared_regs
*pc
;
2146 pc
= per_cpu(cpu_hw_events
, i
).shared_regs
;
2147 if (pc
&& pc
->core_id
== core_id
) {
2148 cpuc
->kfree_on_online
= cpuc
->shared_regs
;
2149 cpuc
->shared_regs
= pc
;
2153 cpuc
->shared_regs
->core_id
= core_id
;
2154 cpuc
->shared_regs
->refcnt
++;
2157 if (x86_pmu
.lbr_sel_map
)
2158 cpuc
->lbr_sel
= &cpuc
->shared_regs
->regs
[EXTRA_REG_LBR
];
2161 static void intel_pmu_cpu_dying(int cpu
)
2163 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
2164 struct intel_shared_regs
*pc
;
2166 pc
= cpuc
->shared_regs
;
2168 if (pc
->core_id
== -1 || --pc
->refcnt
== 0)
2170 cpuc
->shared_regs
= NULL
;
2173 fini_debug_store_on_cpu(cpu
);
2176 static void intel_pmu_flush_branch_stack(void)
2179 * Intel LBR does not tag entries with the
2180 * PID of the current task, then we need to
2182 * For now, we simply reset it
2185 intel_pmu_lbr_reset();
2188 PMU_FORMAT_ATTR(offcore_rsp
, "config1:0-63");
2190 PMU_FORMAT_ATTR(ldlat
, "config1:0-15");
2192 static struct attribute
*intel_arch3_formats_attr
[] = {
2193 &format_attr_event
.attr
,
2194 &format_attr_umask
.attr
,
2195 &format_attr_edge
.attr
,
2196 &format_attr_pc
.attr
,
2197 &format_attr_any
.attr
,
2198 &format_attr_inv
.attr
,
2199 &format_attr_cmask
.attr
,
2200 &format_attr_in_tx
.attr
,
2201 &format_attr_in_tx_cp
.attr
,
2203 &format_attr_offcore_rsp
.attr
, /* XXX do NHM/WSM + SNB breakout */
2204 &format_attr_ldlat
.attr
, /* PEBS load latency */
2208 static __initconst
const struct x86_pmu intel_pmu
= {
2210 .handle_irq
= intel_pmu_handle_irq
,
2211 .disable_all
= intel_pmu_disable_all
,
2212 .enable_all
= intel_pmu_enable_all
,
2213 .enable
= intel_pmu_enable_event
,
2214 .disable
= intel_pmu_disable_event
,
2215 .hw_config
= intel_pmu_hw_config
,
2216 .schedule_events
= x86_schedule_events
,
2217 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
2218 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
2219 .event_map
= intel_pmu_event_map
,
2220 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
2223 * Intel PMCs cannot be accessed sanely above 32 bit width,
2224 * so we install an artificial 1<<31 period regardless of
2225 * the generic event period:
2227 .max_period
= (1ULL << 31) - 1,
2228 .get_event_constraints
= intel_get_event_constraints
,
2229 .put_event_constraints
= intel_put_event_constraints
,
2230 .pebs_aliases
= intel_pebs_aliases_core2
,
2232 .format_attrs
= intel_arch3_formats_attr
,
2233 .events_sysfs_show
= intel_event_sysfs_show
,
2235 .cpu_prepare
= intel_pmu_cpu_prepare
,
2236 .cpu_starting
= intel_pmu_cpu_starting
,
2237 .cpu_dying
= intel_pmu_cpu_dying
,
2238 .guest_get_msrs
= intel_guest_get_msrs
,
2239 .flush_branch_stack
= intel_pmu_flush_branch_stack
,
2242 static __init
void intel_clovertown_quirk(void)
2245 * PEBS is unreliable due to:
2247 * AJ67 - PEBS may experience CPL leaks
2248 * AJ68 - PEBS PMI may be delayed by one event
2249 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
2250 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
2252 * AJ67 could be worked around by restricting the OS/USR flags.
2253 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
2255 * AJ106 could possibly be worked around by not allowing LBR
2256 * usage from PEBS, including the fixup.
2257 * AJ68 could possibly be worked around by always programming
2258 * a pebs_event_reset[0] value and coping with the lost events.
2260 * But taken together it might just make sense to not enable PEBS on
2263 pr_warn("PEBS disabled due to CPU errata\n");
2265 x86_pmu
.pebs_constraints
= NULL
;
2268 static int intel_snb_pebs_broken(int cpu
)
2270 u32 rev
= UINT_MAX
; /* default to broken for unknown models */
2272 switch (cpu_data(cpu
).x86_model
) {
2277 case 45: /* SNB-EP */
2278 switch (cpu_data(cpu
).x86_mask
) {
2279 case 6: rev
= 0x618; break;
2280 case 7: rev
= 0x70c; break;
2284 return (cpu_data(cpu
).microcode
< rev
);
2287 static void intel_snb_check_microcode(void)
2289 int pebs_broken
= 0;
2293 for_each_online_cpu(cpu
) {
2294 if ((pebs_broken
= intel_snb_pebs_broken(cpu
)))
2299 if (pebs_broken
== x86_pmu
.pebs_broken
)
2303 * Serialized by the microcode lock..
2305 if (x86_pmu
.pebs_broken
) {
2306 pr_info("PEBS enabled due to microcode update\n");
2307 x86_pmu
.pebs_broken
= 0;
2309 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
2310 x86_pmu
.pebs_broken
= 1;
2315 * Under certain circumstances, access certain MSR may cause #GP.
2316 * The function tests if the input MSR can be safely accessed.
2318 static bool check_msr(unsigned long msr
, u64 mask
)
2320 u64 val_old
, val_new
, val_tmp
;
2323 * Read the current value, change it and read it back to see if it
2324 * matches, this is needed to detect certain hardware emulators
2325 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
2327 if (rdmsrl_safe(msr
, &val_old
))
2331 * Only change the bits which can be updated by wrmsrl.
2333 val_tmp
= val_old
^ mask
;
2334 if (wrmsrl_safe(msr
, val_tmp
) ||
2335 rdmsrl_safe(msr
, &val_new
))
2338 if (val_new
!= val_tmp
)
2341 /* Here it's sure that the MSR can be safely accessed.
2342 * Restore the old value and return.
2344 wrmsrl(msr
, val_old
);
2349 static __init
void intel_sandybridge_quirk(void)
2351 x86_pmu
.check_microcode
= intel_snb_check_microcode
;
2352 intel_snb_check_microcode();
2355 static const struct { int id
; char *name
; } intel_arch_events_map
[] __initconst
= {
2356 { PERF_COUNT_HW_CPU_CYCLES
, "cpu cycles" },
2357 { PERF_COUNT_HW_INSTRUCTIONS
, "instructions" },
2358 { PERF_COUNT_HW_BUS_CYCLES
, "bus cycles" },
2359 { PERF_COUNT_HW_CACHE_REFERENCES
, "cache references" },
2360 { PERF_COUNT_HW_CACHE_MISSES
, "cache misses" },
2361 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS
, "branch instructions" },
2362 { PERF_COUNT_HW_BRANCH_MISSES
, "branch misses" },
2365 static __init
void intel_arch_events_quirk(void)
2369 /* disable event that reported as not presend by cpuid */
2370 for_each_set_bit(bit
, x86_pmu
.events_mask
, ARRAY_SIZE(intel_arch_events_map
)) {
2371 intel_perfmon_event_map
[intel_arch_events_map
[bit
].id
] = 0;
2372 pr_warn("CPUID marked event: \'%s\' unavailable\n",
2373 intel_arch_events_map
[bit
].name
);
2377 static __init
void intel_nehalem_quirk(void)
2379 union cpuid10_ebx ebx
;
2381 ebx
.full
= x86_pmu
.events_maskl
;
2382 if (ebx
.split
.no_branch_misses_retired
) {
2384 * Erratum AAJ80 detected, we work it around by using
2385 * the BR_MISP_EXEC.ANY event. This will over-count
2386 * branch-misses, but it's still much better than the
2387 * architectural event which is often completely bogus:
2389 intel_perfmon_event_map
[PERF_COUNT_HW_BRANCH_MISSES
] = 0x7f89;
2390 ebx
.split
.no_branch_misses_retired
= 0;
2391 x86_pmu
.events_maskl
= ebx
.full
;
2392 pr_info("CPU erratum AAJ80 worked around\n");
2396 EVENT_ATTR_STR(mem
-loads
, mem_ld_hsw
, "event=0xcd,umask=0x1,ldlat=3");
2397 EVENT_ATTR_STR(mem
-stores
, mem_st_hsw
, "event=0xd0,umask=0x82")
2399 /* Haswell special events */
2400 EVENT_ATTR_STR(tx
-start
, tx_start
, "event=0xc9,umask=0x1");
2401 EVENT_ATTR_STR(tx
-commit
, tx_commit
, "event=0xc9,umask=0x2");
2402 EVENT_ATTR_STR(tx
-abort
, tx_abort
, "event=0xc9,umask=0x4");
2403 EVENT_ATTR_STR(tx
-capacity
, tx_capacity
, "event=0x54,umask=0x2");
2404 EVENT_ATTR_STR(tx
-conflict
, tx_conflict
, "event=0x54,umask=0x1");
2405 EVENT_ATTR_STR(el
-start
, el_start
, "event=0xc8,umask=0x1");
2406 EVENT_ATTR_STR(el
-commit
, el_commit
, "event=0xc8,umask=0x2");
2407 EVENT_ATTR_STR(el
-abort
, el_abort
, "event=0xc8,umask=0x4");
2408 EVENT_ATTR_STR(el
-capacity
, el_capacity
, "event=0x54,umask=0x2");
2409 EVENT_ATTR_STR(el
-conflict
, el_conflict
, "event=0x54,umask=0x1");
2410 EVENT_ATTR_STR(cycles
-t
, cycles_t
, "event=0x3c,in_tx=1");
2411 EVENT_ATTR_STR(cycles
-ct
, cycles_ct
, "event=0x3c,in_tx=1,in_tx_cp=1");
2413 static struct attribute
*hsw_events_attrs
[] = {
2414 EVENT_PTR(tx_start
),
2415 EVENT_PTR(tx_commit
),
2416 EVENT_PTR(tx_abort
),
2417 EVENT_PTR(tx_capacity
),
2418 EVENT_PTR(tx_conflict
),
2419 EVENT_PTR(el_start
),
2420 EVENT_PTR(el_commit
),
2421 EVENT_PTR(el_abort
),
2422 EVENT_PTR(el_capacity
),
2423 EVENT_PTR(el_conflict
),
2424 EVENT_PTR(cycles_t
),
2425 EVENT_PTR(cycles_ct
),
2426 EVENT_PTR(mem_ld_hsw
),
2427 EVENT_PTR(mem_st_hsw
),
2431 __init
int intel_pmu_init(void)
2433 union cpuid10_edx edx
;
2434 union cpuid10_eax eax
;
2435 union cpuid10_ebx ebx
;
2436 struct event_constraint
*c
;
2437 unsigned int unused
;
2438 struct extra_reg
*er
;
2441 if (!cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
)) {
2442 switch (boot_cpu_data
.x86
) {
2444 return p6_pmu_init();
2446 return knc_pmu_init();
2448 return p4_pmu_init();
2454 * Check whether the Architectural PerfMon supports
2455 * Branch Misses Retired hw_event or not.
2457 cpuid(10, &eax
.full
, &ebx
.full
, &unused
, &edx
.full
);
2458 if (eax
.split
.mask_length
< ARCH_PERFMON_EVENTS_COUNT
)
2461 version
= eax
.split
.version_id
;
2465 x86_pmu
= intel_pmu
;
2467 x86_pmu
.version
= version
;
2468 x86_pmu
.num_counters
= eax
.split
.num_counters
;
2469 x86_pmu
.cntval_bits
= eax
.split
.bit_width
;
2470 x86_pmu
.cntval_mask
= (1ULL << eax
.split
.bit_width
) - 1;
2472 x86_pmu
.events_maskl
= ebx
.full
;
2473 x86_pmu
.events_mask_len
= eax
.split
.mask_length
;
2475 x86_pmu
.max_pebs_events
= min_t(unsigned, MAX_PEBS_EVENTS
, x86_pmu
.num_counters
);
2478 * Quirk: v2 perfmon does not report fixed-purpose events, so
2479 * assume at least 3 events:
2482 x86_pmu
.num_counters_fixed
= max((int)edx
.split
.num_counters_fixed
, 3);
2484 if (boot_cpu_has(X86_FEATURE_PDCM
)) {
2487 rdmsrl(MSR_IA32_PERF_CAPABILITIES
, capabilities
);
2488 x86_pmu
.intel_cap
.capabilities
= capabilities
;
2493 x86_add_quirk(intel_arch_events_quirk
); /* Install first, so it runs last */
2496 * Install the hw-cache-events table:
2498 switch (boot_cpu_data
.x86_model
) {
2499 case 14: /* 65nm Core "Yonah" */
2500 pr_cont("Core events, ");
2503 case 15: /* 65nm Core2 "Merom" */
2504 x86_add_quirk(intel_clovertown_quirk
);
2505 case 22: /* 65nm Core2 "Merom-L" */
2506 case 23: /* 45nm Core2 "Penryn" */
2507 case 29: /* 45nm Core2 "Dunnington (MP) */
2508 memcpy(hw_cache_event_ids
, core2_hw_cache_event_ids
,
2509 sizeof(hw_cache_event_ids
));
2511 intel_pmu_lbr_init_core();
2513 x86_pmu
.event_constraints
= intel_core2_event_constraints
;
2514 x86_pmu
.pebs_constraints
= intel_core2_pebs_event_constraints
;
2515 pr_cont("Core2 events, ");
2518 case 30: /* 45nm Nehalem */
2519 case 26: /* 45nm Nehalem-EP */
2520 case 46: /* 45nm Nehalem-EX */
2521 memcpy(hw_cache_event_ids
, nehalem_hw_cache_event_ids
,
2522 sizeof(hw_cache_event_ids
));
2523 memcpy(hw_cache_extra_regs
, nehalem_hw_cache_extra_regs
,
2524 sizeof(hw_cache_extra_regs
));
2526 intel_pmu_lbr_init_nhm();
2528 x86_pmu
.event_constraints
= intel_nehalem_event_constraints
;
2529 x86_pmu
.pebs_constraints
= intel_nehalem_pebs_event_constraints
;
2530 x86_pmu
.enable_all
= intel_pmu_nhm_enable_all
;
2531 x86_pmu
.extra_regs
= intel_nehalem_extra_regs
;
2533 x86_pmu
.cpu_events
= nhm_events_attrs
;
2535 /* UOPS_ISSUED.STALLED_CYCLES */
2536 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2537 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2538 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
2539 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
2540 X86_CONFIG(.event
=0xb1, .umask
=0x3f, .inv
=1, .cmask
=1);
2542 x86_add_quirk(intel_nehalem_quirk
);
2544 pr_cont("Nehalem events, ");
2547 case 28: /* 45nm Atom "Pineview" */
2548 case 38: /* 45nm Atom "Lincroft" */
2549 case 39: /* 32nm Atom "Penwell" */
2550 case 53: /* 32nm Atom "Cloverview" */
2551 case 54: /* 32nm Atom "Cedarview" */
2552 memcpy(hw_cache_event_ids
, atom_hw_cache_event_ids
,
2553 sizeof(hw_cache_event_ids
));
2555 intel_pmu_lbr_init_atom();
2557 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
2558 x86_pmu
.pebs_constraints
= intel_atom_pebs_event_constraints
;
2559 pr_cont("Atom events, ");
2562 case 55: /* 22nm Atom "Silvermont" */
2563 case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
2564 memcpy(hw_cache_event_ids
, slm_hw_cache_event_ids
,
2565 sizeof(hw_cache_event_ids
));
2566 memcpy(hw_cache_extra_regs
, slm_hw_cache_extra_regs
,
2567 sizeof(hw_cache_extra_regs
));
2569 intel_pmu_lbr_init_atom();
2571 x86_pmu
.event_constraints
= intel_slm_event_constraints
;
2572 x86_pmu
.pebs_constraints
= intel_slm_pebs_event_constraints
;
2573 x86_pmu
.extra_regs
= intel_slm_extra_regs
;
2574 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2575 pr_cont("Silvermont events, ");
2578 case 37: /* 32nm Westmere */
2579 case 44: /* 32nm Westmere-EP */
2580 case 47: /* 32nm Westmere-EX */
2581 memcpy(hw_cache_event_ids
, westmere_hw_cache_event_ids
,
2582 sizeof(hw_cache_event_ids
));
2583 memcpy(hw_cache_extra_regs
, nehalem_hw_cache_extra_regs
,
2584 sizeof(hw_cache_extra_regs
));
2586 intel_pmu_lbr_init_nhm();
2588 x86_pmu
.event_constraints
= intel_westmere_event_constraints
;
2589 x86_pmu
.enable_all
= intel_pmu_nhm_enable_all
;
2590 x86_pmu
.pebs_constraints
= intel_westmere_pebs_event_constraints
;
2591 x86_pmu
.extra_regs
= intel_westmere_extra_regs
;
2592 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2594 x86_pmu
.cpu_events
= nhm_events_attrs
;
2596 /* UOPS_ISSUED.STALLED_CYCLES */
2597 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2598 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2599 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
2600 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
2601 X86_CONFIG(.event
=0xb1, .umask
=0x3f, .inv
=1, .cmask
=1);
2603 pr_cont("Westmere events, ");
2606 case 42: /* 32nm SandyBridge */
2607 case 45: /* 32nm SandyBridge-E/EN/EP */
2608 x86_add_quirk(intel_sandybridge_quirk
);
2609 memcpy(hw_cache_event_ids
, snb_hw_cache_event_ids
,
2610 sizeof(hw_cache_event_ids
));
2611 memcpy(hw_cache_extra_regs
, snb_hw_cache_extra_regs
,
2612 sizeof(hw_cache_extra_regs
));
2614 intel_pmu_lbr_init_snb();
2616 x86_pmu
.event_constraints
= intel_snb_event_constraints
;
2617 x86_pmu
.pebs_constraints
= intel_snb_pebs_event_constraints
;
2618 x86_pmu
.pebs_aliases
= intel_pebs_aliases_snb
;
2619 if (boot_cpu_data
.x86_model
== 45)
2620 x86_pmu
.extra_regs
= intel_snbep_extra_regs
;
2622 x86_pmu
.extra_regs
= intel_snb_extra_regs
;
2623 /* all extra regs are per-cpu when HT is on */
2624 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2625 x86_pmu
.er_flags
|= ERF_NO_HT_SHARING
;
2627 x86_pmu
.cpu_events
= snb_events_attrs
;
2629 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
2630 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2631 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2632 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
2633 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
2634 X86_CONFIG(.event
=0xb1, .umask
=0x01, .inv
=1, .cmask
=1);
2636 pr_cont("SandyBridge events, ");
2639 case 58: /* 22nm IvyBridge */
2640 case 62: /* 22nm IvyBridge-EP/EX */
2641 memcpy(hw_cache_event_ids
, snb_hw_cache_event_ids
,
2642 sizeof(hw_cache_event_ids
));
2643 /* dTLB-load-misses on IVB is different than SNB */
2644 hw_cache_event_ids
[C(DTLB
)][C(OP_READ
)][C(RESULT_MISS
)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
2646 memcpy(hw_cache_extra_regs
, snb_hw_cache_extra_regs
,
2647 sizeof(hw_cache_extra_regs
));
2649 intel_pmu_lbr_init_snb();
2651 x86_pmu
.event_constraints
= intel_ivb_event_constraints
;
2652 x86_pmu
.pebs_constraints
= intel_ivb_pebs_event_constraints
;
2653 x86_pmu
.pebs_aliases
= intel_pebs_aliases_snb
;
2654 if (boot_cpu_data
.x86_model
== 62)
2655 x86_pmu
.extra_regs
= intel_snbep_extra_regs
;
2657 x86_pmu
.extra_regs
= intel_snb_extra_regs
;
2658 /* all extra regs are per-cpu when HT is on */
2659 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2660 x86_pmu
.er_flags
|= ERF_NO_HT_SHARING
;
2662 x86_pmu
.cpu_events
= snb_events_attrs
;
2664 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
2665 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2666 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2668 pr_cont("IvyBridge events, ");
2672 case 60: /* 22nm Haswell Core */
2673 case 63: /* 22nm Haswell Server */
2674 case 69: /* 22nm Haswell ULT */
2675 case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
2676 x86_pmu
.late_ack
= true;
2677 memcpy(hw_cache_event_ids
, snb_hw_cache_event_ids
, sizeof(hw_cache_event_ids
));
2678 memcpy(hw_cache_extra_regs
, snb_hw_cache_extra_regs
, sizeof(hw_cache_extra_regs
));
2680 intel_pmu_lbr_init_snb();
2682 x86_pmu
.event_constraints
= intel_hsw_event_constraints
;
2683 x86_pmu
.pebs_constraints
= intel_hsw_pebs_event_constraints
;
2684 x86_pmu
.extra_regs
= intel_snbep_extra_regs
;
2685 x86_pmu
.pebs_aliases
= intel_pebs_aliases_snb
;
2686 /* all extra regs are per-cpu when HT is on */
2687 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2688 x86_pmu
.er_flags
|= ERF_NO_HT_SHARING
;
2690 x86_pmu
.hw_config
= hsw_hw_config
;
2691 x86_pmu
.get_event_constraints
= hsw_get_event_constraints
;
2692 x86_pmu
.cpu_events
= hsw_events_attrs
;
2693 x86_pmu
.lbr_double_abort
= true;
2694 pr_cont("Haswell events, ");
2697 case 61: /* 14nm Broadwell Core-M */
2698 x86_pmu
.late_ack
= true;
2699 memcpy(hw_cache_event_ids
, hsw_hw_cache_event_ids
, sizeof(hw_cache_event_ids
));
2700 memcpy(hw_cache_extra_regs
, hsw_hw_cache_extra_regs
, sizeof(hw_cache_extra_regs
));
2702 intel_pmu_lbr_init_snb();
2704 x86_pmu
.event_constraints
= intel_bdw_event_constraints
;
2705 x86_pmu
.pebs_constraints
= intel_hsw_pebs_event_constraints
;
2706 x86_pmu
.extra_regs
= intel_snbep_extra_regs
;
2707 x86_pmu
.pebs_aliases
= intel_pebs_aliases_snb
;
2708 /* all extra regs are per-cpu when HT is on */
2709 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2710 x86_pmu
.er_flags
|= ERF_NO_HT_SHARING
;
2712 x86_pmu
.hw_config
= hsw_hw_config
;
2713 x86_pmu
.get_event_constraints
= hsw_get_event_constraints
;
2714 x86_pmu
.cpu_events
= hsw_events_attrs
;
2715 pr_cont("Broadwell events, ");
2719 switch (x86_pmu
.version
) {
2721 x86_pmu
.event_constraints
= intel_v1_event_constraints
;
2722 pr_cont("generic architected perfmon v1, ");
2726 * default constraints for v2 and up
2728 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
2729 pr_cont("generic architected perfmon, ");
2734 if (x86_pmu
.num_counters
> INTEL_PMC_MAX_GENERIC
) {
2735 WARN(1, KERN_ERR
"hw perf events %d > max(%d), clipping!",
2736 x86_pmu
.num_counters
, INTEL_PMC_MAX_GENERIC
);
2737 x86_pmu
.num_counters
= INTEL_PMC_MAX_GENERIC
;
2739 x86_pmu
.intel_ctrl
= (1 << x86_pmu
.num_counters
) - 1;
2741 if (x86_pmu
.num_counters_fixed
> INTEL_PMC_MAX_FIXED
) {
2742 WARN(1, KERN_ERR
"hw perf events fixed %d > max(%d), clipping!",
2743 x86_pmu
.num_counters_fixed
, INTEL_PMC_MAX_FIXED
);
2744 x86_pmu
.num_counters_fixed
= INTEL_PMC_MAX_FIXED
;
2747 x86_pmu
.intel_ctrl
|=
2748 ((1LL << x86_pmu
.num_counters_fixed
)-1) << INTEL_PMC_IDX_FIXED
;
2750 if (x86_pmu
.event_constraints
) {
2752 * event on fixed counter2 (REF_CYCLES) only works on this
2753 * counter, so do not extend mask to generic counters
2755 for_each_event_constraint(c
, x86_pmu
.event_constraints
) {
2756 if (c
->cmask
!= FIXED_EVENT_FLAGS
2757 || c
->idxmsk64
== INTEL_PMC_MSK_FIXED_REF_CYCLES
) {
2761 c
->idxmsk64
|= (1ULL << x86_pmu
.num_counters
) - 1;
2762 c
->weight
+= x86_pmu
.num_counters
;
2767 * Access LBR MSR may cause #GP under certain circumstances.
2768 * E.g. KVM doesn't support LBR MSR
2769 * Check all LBT MSR here.
2770 * Disable LBR access if any LBR MSRs can not be accessed.
2772 if (x86_pmu
.lbr_nr
&& !check_msr(x86_pmu
.lbr_tos
, 0x3UL
))
2774 for (i
= 0; i
< x86_pmu
.lbr_nr
; i
++) {
2775 if (!(check_msr(x86_pmu
.lbr_from
+ i
, 0xffffUL
) &&
2776 check_msr(x86_pmu
.lbr_to
+ i
, 0xffffUL
)))
2781 * Access extra MSR may cause #GP under certain circumstances.
2782 * E.g. KVM doesn't support offcore event
2783 * Check all extra_regs here.
2785 if (x86_pmu
.extra_regs
) {
2786 for (er
= x86_pmu
.extra_regs
; er
->msr
; er
++) {
2787 er
->extra_msr_access
= check_msr(er
->msr
, 0x1ffUL
);
2788 /* Disable LBR select mapping */
2789 if ((er
->idx
== EXTRA_REG_LBR
) && !er
->extra_msr_access
)
2790 x86_pmu
.lbr_sel_map
= NULL
;
2794 /* Support full width counters using alternative MSR range */
2795 if (x86_pmu
.intel_cap
.full_width_write
) {
2796 x86_pmu
.max_period
= x86_pmu
.cntval_mask
;
2797 x86_pmu
.perfctr
= MSR_IA32_PMC0
;
2798 pr_cont("full-width counters, ");