4 * Used to coordinate shared registers between HT threads or
5 * among events on a single PMU.
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/stddef.h>
11 #include <linux/types.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/export.h>
16 #include <asm/cpufeature.h>
17 #include <asm/hardirq.h>
20 #include "perf_event.h"
23 * Intel PerfMon, used on Core and later.
25 static u64 intel_perfmon_event_map
[PERF_COUNT_HW_MAX
] __read_mostly
=
27 [PERF_COUNT_HW_CPU_CYCLES
] = 0x003c,
28 [PERF_COUNT_HW_INSTRUCTIONS
] = 0x00c0,
29 [PERF_COUNT_HW_CACHE_REFERENCES
] = 0x4f2e,
30 [PERF_COUNT_HW_CACHE_MISSES
] = 0x412e,
31 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = 0x00c4,
32 [PERF_COUNT_HW_BRANCH_MISSES
] = 0x00c5,
33 [PERF_COUNT_HW_BUS_CYCLES
] = 0x013c,
34 [PERF_COUNT_HW_REF_CPU_CYCLES
] = 0x0300, /* pseudo-encoding */
37 static struct event_constraint intel_core_event_constraints
[] __read_mostly
=
39 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
40 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
41 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
42 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
43 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
44 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
48 static struct event_constraint intel_core2_event_constraints
[] __read_mostly
=
50 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
51 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
52 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
53 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
54 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
55 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
56 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
57 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
58 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
59 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
60 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
61 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
62 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
66 static struct event_constraint intel_nehalem_event_constraints
[] __read_mostly
=
68 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
69 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
70 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
71 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
72 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
73 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
74 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
75 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
76 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
77 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
78 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
82 static struct extra_reg intel_nehalem_extra_regs
[] __read_mostly
=
84 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
85 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0
, 0xffff, RSP_0
),
86 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
90 static struct event_constraint intel_westmere_event_constraints
[] __read_mostly
=
92 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
93 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
94 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
95 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
96 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
97 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
98 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
102 static struct event_constraint intel_snb_event_constraints
[] __read_mostly
=
104 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
105 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
106 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
107 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
108 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
109 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
110 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
111 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
112 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
113 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
114 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
115 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
119 static struct event_constraint intel_ivb_event_constraints
[] __read_mostly
=
121 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
122 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
123 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
124 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
125 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
126 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
127 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
128 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
129 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
130 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
131 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
132 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
133 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
135 * Errata BV98 -- MEM_*_RETIRED events can leak between counters of SMT
136 * siblings; disable these events because they can corrupt unrelated
139 INTEL_EVENT_CONSTRAINT(0xd0, 0x0), /* MEM_UOPS_RETIRED.* */
140 INTEL_EVENT_CONSTRAINT(0xd1, 0x0), /* MEM_LOAD_UOPS_RETIRED.* */
141 INTEL_EVENT_CONSTRAINT(0xd2, 0x0), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
142 INTEL_EVENT_CONSTRAINT(0xd3, 0x0), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
146 static struct extra_reg intel_westmere_extra_regs
[] __read_mostly
=
148 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
149 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0
, 0xffff, RSP_0
),
150 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1
, 0xffff, RSP_1
),
151 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
155 static struct event_constraint intel_v1_event_constraints
[] __read_mostly
=
160 static struct event_constraint intel_gen_event_constraints
[] __read_mostly
=
162 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
163 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
164 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
168 static struct event_constraint intel_slm_event_constraints
[] __read_mostly
=
170 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
171 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
172 FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF */
173 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
177 static struct extra_reg intel_snb_extra_regs
[] __read_mostly
= {
178 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
179 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0
, 0x3f807f8fffull
, RSP_0
),
180 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1
, 0x3f807f8fffull
, RSP_1
),
181 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
185 static struct extra_reg intel_snbep_extra_regs
[] __read_mostly
= {
186 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
187 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0
, 0x3fffff8fffull
, RSP_0
),
188 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1
, 0x3fffff8fffull
, RSP_1
),
189 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
193 EVENT_ATTR_STR(mem
-loads
, mem_ld_nhm
, "event=0x0b,umask=0x10,ldlat=3");
194 EVENT_ATTR_STR(mem
-loads
, mem_ld_snb
, "event=0xcd,umask=0x1,ldlat=3");
195 EVENT_ATTR_STR(mem
-stores
, mem_st_snb
, "event=0xcd,umask=0x2");
197 struct attribute
*nhm_events_attrs
[] = {
198 EVENT_PTR(mem_ld_nhm
),
202 struct attribute
*snb_events_attrs
[] = {
203 EVENT_PTR(mem_ld_snb
),
204 EVENT_PTR(mem_st_snb
),
208 static struct event_constraint intel_hsw_event_constraints
[] = {
209 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
210 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
211 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
212 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.* */
213 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
214 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
215 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
216 INTEL_EVENT_CONSTRAINT(0x08a3, 0x4),
217 /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
218 INTEL_EVENT_CONSTRAINT(0x0ca3, 0x4),
219 /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
220 INTEL_EVENT_CONSTRAINT(0x04a3, 0xf),
224 static u64
intel_pmu_event_map(int hw_event
)
226 return intel_perfmon_event_map
[hw_event
];
229 #define SNB_DMND_DATA_RD (1ULL << 0)
230 #define SNB_DMND_RFO (1ULL << 1)
231 #define SNB_DMND_IFETCH (1ULL << 2)
232 #define SNB_DMND_WB (1ULL << 3)
233 #define SNB_PF_DATA_RD (1ULL << 4)
234 #define SNB_PF_RFO (1ULL << 5)
235 #define SNB_PF_IFETCH (1ULL << 6)
236 #define SNB_LLC_DATA_RD (1ULL << 7)
237 #define SNB_LLC_RFO (1ULL << 8)
238 #define SNB_LLC_IFETCH (1ULL << 9)
239 #define SNB_BUS_LOCKS (1ULL << 10)
240 #define SNB_STRM_ST (1ULL << 11)
241 #define SNB_OTHER (1ULL << 15)
242 #define SNB_RESP_ANY (1ULL << 16)
243 #define SNB_NO_SUPP (1ULL << 17)
244 #define SNB_LLC_HITM (1ULL << 18)
245 #define SNB_LLC_HITE (1ULL << 19)
246 #define SNB_LLC_HITS (1ULL << 20)
247 #define SNB_LLC_HITF (1ULL << 21)
248 #define SNB_LOCAL (1ULL << 22)
249 #define SNB_REMOTE (0xffULL << 23)
250 #define SNB_SNP_NONE (1ULL << 31)
251 #define SNB_SNP_NOT_NEEDED (1ULL << 32)
252 #define SNB_SNP_MISS (1ULL << 33)
253 #define SNB_NO_FWD (1ULL << 34)
254 #define SNB_SNP_FWD (1ULL << 35)
255 #define SNB_HITM (1ULL << 36)
256 #define SNB_NON_DRAM (1ULL << 37)
258 #define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
259 #define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
260 #define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
262 #define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
263 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
266 #define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
267 #define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
269 #define SNB_L3_ACCESS SNB_RESP_ANY
270 #define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
272 static __initconst
const u64 snb_hw_cache_extra_regs
273 [PERF_COUNT_HW_CACHE_MAX
]
274 [PERF_COUNT_HW_CACHE_OP_MAX
]
275 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
279 [ C(RESULT_ACCESS
) ] = SNB_DMND_READ
|SNB_L3_ACCESS
,
280 [ C(RESULT_MISS
) ] = SNB_DMND_READ
|SNB_L3_MISS
,
283 [ C(RESULT_ACCESS
) ] = SNB_DMND_WRITE
|SNB_L3_ACCESS
,
284 [ C(RESULT_MISS
) ] = SNB_DMND_WRITE
|SNB_L3_MISS
,
286 [ C(OP_PREFETCH
) ] = {
287 [ C(RESULT_ACCESS
) ] = SNB_DMND_PREFETCH
|SNB_L3_ACCESS
,
288 [ C(RESULT_MISS
) ] = SNB_DMND_PREFETCH
|SNB_L3_MISS
,
293 [ C(RESULT_ACCESS
) ] = SNB_DMND_READ
|SNB_DRAM_ANY
,
294 [ C(RESULT_MISS
) ] = SNB_DMND_READ
|SNB_DRAM_REMOTE
,
297 [ C(RESULT_ACCESS
) ] = SNB_DMND_WRITE
|SNB_DRAM_ANY
,
298 [ C(RESULT_MISS
) ] = SNB_DMND_WRITE
|SNB_DRAM_REMOTE
,
300 [ C(OP_PREFETCH
) ] = {
301 [ C(RESULT_ACCESS
) ] = SNB_DMND_PREFETCH
|SNB_DRAM_ANY
,
302 [ C(RESULT_MISS
) ] = SNB_DMND_PREFETCH
|SNB_DRAM_REMOTE
,
307 static __initconst
const u64 snb_hw_cache_event_ids
308 [PERF_COUNT_HW_CACHE_MAX
]
309 [PERF_COUNT_HW_CACHE_OP_MAX
]
310 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
314 [ C(RESULT_ACCESS
) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
315 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPLACEMENT */
318 [ C(RESULT_ACCESS
) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
319 [ C(RESULT_MISS
) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
321 [ C(OP_PREFETCH
) ] = {
322 [ C(RESULT_ACCESS
) ] = 0x0,
323 [ C(RESULT_MISS
) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
328 [ C(RESULT_ACCESS
) ] = 0x0,
329 [ C(RESULT_MISS
) ] = 0x0280, /* ICACHE.MISSES */
332 [ C(RESULT_ACCESS
) ] = -1,
333 [ C(RESULT_MISS
) ] = -1,
335 [ C(OP_PREFETCH
) ] = {
336 [ C(RESULT_ACCESS
) ] = 0x0,
337 [ C(RESULT_MISS
) ] = 0x0,
342 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
343 [ C(RESULT_ACCESS
) ] = 0x01b7,
344 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
345 [ C(RESULT_MISS
) ] = 0x01b7,
348 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
349 [ C(RESULT_ACCESS
) ] = 0x01b7,
350 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
351 [ C(RESULT_MISS
) ] = 0x01b7,
353 [ C(OP_PREFETCH
) ] = {
354 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
355 [ C(RESULT_ACCESS
) ] = 0x01b7,
356 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
357 [ C(RESULT_MISS
) ] = 0x01b7,
362 [ C(RESULT_ACCESS
) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
363 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
366 [ C(RESULT_ACCESS
) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
367 [ C(RESULT_MISS
) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
369 [ C(OP_PREFETCH
) ] = {
370 [ C(RESULT_ACCESS
) ] = 0x0,
371 [ C(RESULT_MISS
) ] = 0x0,
376 [ C(RESULT_ACCESS
) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
377 [ C(RESULT_MISS
) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
380 [ C(RESULT_ACCESS
) ] = -1,
381 [ C(RESULT_MISS
) ] = -1,
383 [ C(OP_PREFETCH
) ] = {
384 [ C(RESULT_ACCESS
) ] = -1,
385 [ C(RESULT_MISS
) ] = -1,
390 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
391 [ C(RESULT_MISS
) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
394 [ C(RESULT_ACCESS
) ] = -1,
395 [ C(RESULT_MISS
) ] = -1,
397 [ C(OP_PREFETCH
) ] = {
398 [ C(RESULT_ACCESS
) ] = -1,
399 [ C(RESULT_MISS
) ] = -1,
404 [ C(RESULT_ACCESS
) ] = 0x01b7,
405 [ C(RESULT_MISS
) ] = 0x01b7,
408 [ C(RESULT_ACCESS
) ] = 0x01b7,
409 [ C(RESULT_MISS
) ] = 0x01b7,
411 [ C(OP_PREFETCH
) ] = {
412 [ C(RESULT_ACCESS
) ] = 0x01b7,
413 [ C(RESULT_MISS
) ] = 0x01b7,
419 static __initconst
const u64 westmere_hw_cache_event_ids
420 [PERF_COUNT_HW_CACHE_MAX
]
421 [PERF_COUNT_HW_CACHE_OP_MAX
]
422 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
426 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
427 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPL */
430 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
431 [ C(RESULT_MISS
) ] = 0x0251, /* L1D.M_REPL */
433 [ C(OP_PREFETCH
) ] = {
434 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
435 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
440 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
441 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
444 [ C(RESULT_ACCESS
) ] = -1,
445 [ C(RESULT_MISS
) ] = -1,
447 [ C(OP_PREFETCH
) ] = {
448 [ C(RESULT_ACCESS
) ] = 0x0,
449 [ C(RESULT_MISS
) ] = 0x0,
454 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
455 [ C(RESULT_ACCESS
) ] = 0x01b7,
456 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
457 [ C(RESULT_MISS
) ] = 0x01b7,
460 * Use RFO, not WRITEBACK, because a write miss would typically occur
464 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
465 [ C(RESULT_ACCESS
) ] = 0x01b7,
466 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
467 [ C(RESULT_MISS
) ] = 0x01b7,
469 [ C(OP_PREFETCH
) ] = {
470 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
471 [ C(RESULT_ACCESS
) ] = 0x01b7,
472 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
473 [ C(RESULT_MISS
) ] = 0x01b7,
478 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
479 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
482 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
483 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
485 [ C(OP_PREFETCH
) ] = {
486 [ C(RESULT_ACCESS
) ] = 0x0,
487 [ C(RESULT_MISS
) ] = 0x0,
492 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
493 [ C(RESULT_MISS
) ] = 0x0185, /* ITLB_MISSES.ANY */
496 [ C(RESULT_ACCESS
) ] = -1,
497 [ C(RESULT_MISS
) ] = -1,
499 [ C(OP_PREFETCH
) ] = {
500 [ C(RESULT_ACCESS
) ] = -1,
501 [ C(RESULT_MISS
) ] = -1,
506 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
507 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
510 [ C(RESULT_ACCESS
) ] = -1,
511 [ C(RESULT_MISS
) ] = -1,
513 [ C(OP_PREFETCH
) ] = {
514 [ C(RESULT_ACCESS
) ] = -1,
515 [ C(RESULT_MISS
) ] = -1,
520 [ C(RESULT_ACCESS
) ] = 0x01b7,
521 [ C(RESULT_MISS
) ] = 0x01b7,
524 [ C(RESULT_ACCESS
) ] = 0x01b7,
525 [ C(RESULT_MISS
) ] = 0x01b7,
527 [ C(OP_PREFETCH
) ] = {
528 [ C(RESULT_ACCESS
) ] = 0x01b7,
529 [ C(RESULT_MISS
) ] = 0x01b7,
535 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
536 * See IA32 SDM Vol 3B 30.6.1.3
539 #define NHM_DMND_DATA_RD (1 << 0)
540 #define NHM_DMND_RFO (1 << 1)
541 #define NHM_DMND_IFETCH (1 << 2)
542 #define NHM_DMND_WB (1 << 3)
543 #define NHM_PF_DATA_RD (1 << 4)
544 #define NHM_PF_DATA_RFO (1 << 5)
545 #define NHM_PF_IFETCH (1 << 6)
546 #define NHM_OFFCORE_OTHER (1 << 7)
547 #define NHM_UNCORE_HIT (1 << 8)
548 #define NHM_OTHER_CORE_HIT_SNP (1 << 9)
549 #define NHM_OTHER_CORE_HITM (1 << 10)
551 #define NHM_REMOTE_CACHE_FWD (1 << 12)
552 #define NHM_REMOTE_DRAM (1 << 13)
553 #define NHM_LOCAL_DRAM (1 << 14)
554 #define NHM_NON_DRAM (1 << 15)
556 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
557 #define NHM_REMOTE (NHM_REMOTE_DRAM)
559 #define NHM_DMND_READ (NHM_DMND_DATA_RD)
560 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
561 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
563 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
564 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
565 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
567 static __initconst
const u64 nehalem_hw_cache_extra_regs
568 [PERF_COUNT_HW_CACHE_MAX
]
569 [PERF_COUNT_HW_CACHE_OP_MAX
]
570 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
574 [ C(RESULT_ACCESS
) ] = NHM_DMND_READ
|NHM_L3_ACCESS
,
575 [ C(RESULT_MISS
) ] = NHM_DMND_READ
|NHM_L3_MISS
,
578 [ C(RESULT_ACCESS
) ] = NHM_DMND_WRITE
|NHM_L3_ACCESS
,
579 [ C(RESULT_MISS
) ] = NHM_DMND_WRITE
|NHM_L3_MISS
,
581 [ C(OP_PREFETCH
) ] = {
582 [ C(RESULT_ACCESS
) ] = NHM_DMND_PREFETCH
|NHM_L3_ACCESS
,
583 [ C(RESULT_MISS
) ] = NHM_DMND_PREFETCH
|NHM_L3_MISS
,
588 [ C(RESULT_ACCESS
) ] = NHM_DMND_READ
|NHM_LOCAL
|NHM_REMOTE
,
589 [ C(RESULT_MISS
) ] = NHM_DMND_READ
|NHM_REMOTE
,
592 [ C(RESULT_ACCESS
) ] = NHM_DMND_WRITE
|NHM_LOCAL
|NHM_REMOTE
,
593 [ C(RESULT_MISS
) ] = NHM_DMND_WRITE
|NHM_REMOTE
,
595 [ C(OP_PREFETCH
) ] = {
596 [ C(RESULT_ACCESS
) ] = NHM_DMND_PREFETCH
|NHM_LOCAL
|NHM_REMOTE
,
597 [ C(RESULT_MISS
) ] = NHM_DMND_PREFETCH
|NHM_REMOTE
,
602 static __initconst
const u64 nehalem_hw_cache_event_ids
603 [PERF_COUNT_HW_CACHE_MAX
]
604 [PERF_COUNT_HW_CACHE_OP_MAX
]
605 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
609 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
610 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPL */
613 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
614 [ C(RESULT_MISS
) ] = 0x0251, /* L1D.M_REPL */
616 [ C(OP_PREFETCH
) ] = {
617 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
618 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
623 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
624 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
627 [ C(RESULT_ACCESS
) ] = -1,
628 [ C(RESULT_MISS
) ] = -1,
630 [ C(OP_PREFETCH
) ] = {
631 [ C(RESULT_ACCESS
) ] = 0x0,
632 [ C(RESULT_MISS
) ] = 0x0,
637 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
638 [ C(RESULT_ACCESS
) ] = 0x01b7,
639 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
640 [ C(RESULT_MISS
) ] = 0x01b7,
643 * Use RFO, not WRITEBACK, because a write miss would typically occur
647 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
648 [ C(RESULT_ACCESS
) ] = 0x01b7,
649 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
650 [ C(RESULT_MISS
) ] = 0x01b7,
652 [ C(OP_PREFETCH
) ] = {
653 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
654 [ C(RESULT_ACCESS
) ] = 0x01b7,
655 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
656 [ C(RESULT_MISS
) ] = 0x01b7,
661 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
662 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
665 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
666 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
668 [ C(OP_PREFETCH
) ] = {
669 [ C(RESULT_ACCESS
) ] = 0x0,
670 [ C(RESULT_MISS
) ] = 0x0,
675 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
676 [ C(RESULT_MISS
) ] = 0x20c8, /* ITLB_MISS_RETIRED */
679 [ C(RESULT_ACCESS
) ] = -1,
680 [ C(RESULT_MISS
) ] = -1,
682 [ C(OP_PREFETCH
) ] = {
683 [ C(RESULT_ACCESS
) ] = -1,
684 [ C(RESULT_MISS
) ] = -1,
689 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
690 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
693 [ C(RESULT_ACCESS
) ] = -1,
694 [ C(RESULT_MISS
) ] = -1,
696 [ C(OP_PREFETCH
) ] = {
697 [ C(RESULT_ACCESS
) ] = -1,
698 [ C(RESULT_MISS
) ] = -1,
703 [ C(RESULT_ACCESS
) ] = 0x01b7,
704 [ C(RESULT_MISS
) ] = 0x01b7,
707 [ C(RESULT_ACCESS
) ] = 0x01b7,
708 [ C(RESULT_MISS
) ] = 0x01b7,
710 [ C(OP_PREFETCH
) ] = {
711 [ C(RESULT_ACCESS
) ] = 0x01b7,
712 [ C(RESULT_MISS
) ] = 0x01b7,
717 static __initconst
const u64 core2_hw_cache_event_ids
718 [PERF_COUNT_HW_CACHE_MAX
]
719 [PERF_COUNT_HW_CACHE_OP_MAX
]
720 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
724 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
725 [ C(RESULT_MISS
) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
728 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
729 [ C(RESULT_MISS
) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
731 [ C(OP_PREFETCH
) ] = {
732 [ C(RESULT_ACCESS
) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
733 [ C(RESULT_MISS
) ] = 0,
738 [ C(RESULT_ACCESS
) ] = 0x0080, /* L1I.READS */
739 [ C(RESULT_MISS
) ] = 0x0081, /* L1I.MISSES */
742 [ C(RESULT_ACCESS
) ] = -1,
743 [ C(RESULT_MISS
) ] = -1,
745 [ C(OP_PREFETCH
) ] = {
746 [ C(RESULT_ACCESS
) ] = 0,
747 [ C(RESULT_MISS
) ] = 0,
752 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
753 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
756 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
757 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
759 [ C(OP_PREFETCH
) ] = {
760 [ C(RESULT_ACCESS
) ] = 0,
761 [ C(RESULT_MISS
) ] = 0,
766 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
767 [ C(RESULT_MISS
) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
770 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
771 [ C(RESULT_MISS
) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
773 [ C(OP_PREFETCH
) ] = {
774 [ C(RESULT_ACCESS
) ] = 0,
775 [ C(RESULT_MISS
) ] = 0,
780 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
781 [ C(RESULT_MISS
) ] = 0x1282, /* ITLBMISSES */
784 [ C(RESULT_ACCESS
) ] = -1,
785 [ C(RESULT_MISS
) ] = -1,
787 [ C(OP_PREFETCH
) ] = {
788 [ C(RESULT_ACCESS
) ] = -1,
789 [ C(RESULT_MISS
) ] = -1,
794 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
795 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
798 [ C(RESULT_ACCESS
) ] = -1,
799 [ C(RESULT_MISS
) ] = -1,
801 [ C(OP_PREFETCH
) ] = {
802 [ C(RESULT_ACCESS
) ] = -1,
803 [ C(RESULT_MISS
) ] = -1,
808 static __initconst
const u64 atom_hw_cache_event_ids
809 [PERF_COUNT_HW_CACHE_MAX
]
810 [PERF_COUNT_HW_CACHE_OP_MAX
]
811 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
815 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE.LD */
816 [ C(RESULT_MISS
) ] = 0,
819 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE.ST */
820 [ C(RESULT_MISS
) ] = 0,
822 [ C(OP_PREFETCH
) ] = {
823 [ C(RESULT_ACCESS
) ] = 0x0,
824 [ C(RESULT_MISS
) ] = 0,
829 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
830 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
833 [ C(RESULT_ACCESS
) ] = -1,
834 [ C(RESULT_MISS
) ] = -1,
836 [ C(OP_PREFETCH
) ] = {
837 [ C(RESULT_ACCESS
) ] = 0,
838 [ C(RESULT_MISS
) ] = 0,
843 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
844 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
847 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
848 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
850 [ C(OP_PREFETCH
) ] = {
851 [ C(RESULT_ACCESS
) ] = 0,
852 [ C(RESULT_MISS
) ] = 0,
857 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
858 [ C(RESULT_MISS
) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
861 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
862 [ C(RESULT_MISS
) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
864 [ C(OP_PREFETCH
) ] = {
865 [ C(RESULT_ACCESS
) ] = 0,
866 [ C(RESULT_MISS
) ] = 0,
871 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
872 [ C(RESULT_MISS
) ] = 0x0282, /* ITLB.MISSES */
875 [ C(RESULT_ACCESS
) ] = -1,
876 [ C(RESULT_MISS
) ] = -1,
878 [ C(OP_PREFETCH
) ] = {
879 [ C(RESULT_ACCESS
) ] = -1,
880 [ C(RESULT_MISS
) ] = -1,
885 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
886 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
889 [ C(RESULT_ACCESS
) ] = -1,
890 [ C(RESULT_MISS
) ] = -1,
892 [ C(OP_PREFETCH
) ] = {
893 [ C(RESULT_ACCESS
) ] = -1,
894 [ C(RESULT_MISS
) ] = -1,
899 static struct extra_reg intel_slm_extra_regs
[] __read_mostly
=
901 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
902 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0
, 0x768005ffffull
, RSP_0
),
903 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1
, 0x768005ffffull
, RSP_1
),
907 #define SLM_DMND_READ SNB_DMND_DATA_RD
908 #define SLM_DMND_WRITE SNB_DMND_RFO
909 #define SLM_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
911 #define SLM_SNP_ANY (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
912 #define SLM_LLC_ACCESS SNB_RESP_ANY
913 #define SLM_LLC_MISS (SLM_SNP_ANY|SNB_NON_DRAM)
915 static __initconst
const u64 slm_hw_cache_extra_regs
916 [PERF_COUNT_HW_CACHE_MAX
]
917 [PERF_COUNT_HW_CACHE_OP_MAX
]
918 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
922 [ C(RESULT_ACCESS
) ] = SLM_DMND_READ
|SLM_LLC_ACCESS
,
923 [ C(RESULT_MISS
) ] = SLM_DMND_READ
|SLM_LLC_MISS
,
926 [ C(RESULT_ACCESS
) ] = SLM_DMND_WRITE
|SLM_LLC_ACCESS
,
927 [ C(RESULT_MISS
) ] = SLM_DMND_WRITE
|SLM_LLC_MISS
,
929 [ C(OP_PREFETCH
) ] = {
930 [ C(RESULT_ACCESS
) ] = SLM_DMND_PREFETCH
|SLM_LLC_ACCESS
,
931 [ C(RESULT_MISS
) ] = SLM_DMND_PREFETCH
|SLM_LLC_MISS
,
936 static __initconst
const u64 slm_hw_cache_event_ids
937 [PERF_COUNT_HW_CACHE_MAX
]
938 [PERF_COUNT_HW_CACHE_OP_MAX
]
939 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
943 [ C(RESULT_ACCESS
) ] = 0,
944 [ C(RESULT_MISS
) ] = 0x0104, /* LD_DCU_MISS */
947 [ C(RESULT_ACCESS
) ] = 0,
948 [ C(RESULT_MISS
) ] = 0,
950 [ C(OP_PREFETCH
) ] = {
951 [ C(RESULT_ACCESS
) ] = 0,
952 [ C(RESULT_MISS
) ] = 0,
957 [ C(RESULT_ACCESS
) ] = 0x0380, /* ICACHE.ACCESSES */
958 [ C(RESULT_MISS
) ] = 0x0280, /* ICACGE.MISSES */
961 [ C(RESULT_ACCESS
) ] = -1,
962 [ C(RESULT_MISS
) ] = -1,
964 [ C(OP_PREFETCH
) ] = {
965 [ C(RESULT_ACCESS
) ] = 0,
966 [ C(RESULT_MISS
) ] = 0,
971 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
972 [ C(RESULT_ACCESS
) ] = 0x01b7,
973 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
974 [ C(RESULT_MISS
) ] = 0x01b7,
977 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
978 [ C(RESULT_ACCESS
) ] = 0x01b7,
979 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
980 [ C(RESULT_MISS
) ] = 0x01b7,
982 [ C(OP_PREFETCH
) ] = {
983 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
984 [ C(RESULT_ACCESS
) ] = 0x01b7,
985 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
986 [ C(RESULT_MISS
) ] = 0x01b7,
991 [ C(RESULT_ACCESS
) ] = 0,
992 [ C(RESULT_MISS
) ] = 0x0804, /* LD_DTLB_MISS */
995 [ C(RESULT_ACCESS
) ] = 0,
996 [ C(RESULT_MISS
) ] = 0,
998 [ C(OP_PREFETCH
) ] = {
999 [ C(RESULT_ACCESS
) ] = 0,
1000 [ C(RESULT_MISS
) ] = 0,
1005 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1006 [ C(RESULT_MISS
) ] = 0x0282, /* ITLB.MISSES */
1009 [ C(RESULT_ACCESS
) ] = -1,
1010 [ C(RESULT_MISS
) ] = -1,
1012 [ C(OP_PREFETCH
) ] = {
1013 [ C(RESULT_ACCESS
) ] = -1,
1014 [ C(RESULT_MISS
) ] = -1,
1019 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1020 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1023 [ C(RESULT_ACCESS
) ] = -1,
1024 [ C(RESULT_MISS
) ] = -1,
1026 [ C(OP_PREFETCH
) ] = {
1027 [ C(RESULT_ACCESS
) ] = -1,
1028 [ C(RESULT_MISS
) ] = -1,
1033 static inline bool intel_pmu_needs_lbr_smpl(struct perf_event
*event
)
1035 /* user explicitly requested branch sampling */
1036 if (has_branch_stack(event
))
1039 /* implicit branch sampling to correct PEBS skid */
1040 if (x86_pmu
.intel_cap
.pebs_trap
&& event
->attr
.precise_ip
> 1 &&
1041 x86_pmu
.intel_cap
.pebs_format
< 2)
1047 static void intel_pmu_disable_all(void)
1049 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1051 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0);
1053 if (test_bit(INTEL_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
))
1054 intel_pmu_disable_bts();
1056 intel_pmu_pebs_disable_all();
1057 intel_pmu_lbr_disable_all();
1060 static void intel_pmu_enable_all(int added
)
1062 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1064 intel_pmu_pebs_enable_all();
1065 intel_pmu_lbr_enable_all();
1066 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
,
1067 x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_guest_mask
);
1069 if (test_bit(INTEL_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
)) {
1070 struct perf_event
*event
=
1071 cpuc
->events
[INTEL_PMC_IDX_FIXED_BTS
];
1073 if (WARN_ON_ONCE(!event
))
1076 intel_pmu_enable_bts(event
->hw
.config
);
1082 * Intel Errata AAK100 (model 26)
1083 * Intel Errata AAP53 (model 30)
1084 * Intel Errata BD53 (model 44)
1086 * The official story:
1087 * These chips need to be 'reset' when adding counters by programming the
1088 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
1089 * in sequence on the same PMC or on different PMCs.
1091 * In practise it appears some of these events do in fact count, and
1092 * we need to programm all 4 events.
1094 static void intel_pmu_nhm_workaround(void)
1096 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1097 static const unsigned long nhm_magic
[4] = {
1103 struct perf_event
*event
;
1107 * The Errata requires below steps:
1108 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
1109 * 2) Configure 4 PERFEVTSELx with the magic events and clear
1110 * the corresponding PMCx;
1111 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
1112 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
1113 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
1117 * The real steps we choose are a little different from above.
1118 * A) To reduce MSR operations, we don't run step 1) as they
1119 * are already cleared before this function is called;
1120 * B) Call x86_perf_event_update to save PMCx before configuring
1121 * PERFEVTSELx with magic number;
1122 * C) With step 5), we do clear only when the PERFEVTSELx is
1123 * not used currently.
1124 * D) Call x86_perf_event_set_period to restore PMCx;
1127 /* We always operate 4 pairs of PERF Counters */
1128 for (i
= 0; i
< 4; i
++) {
1129 event
= cpuc
->events
[i
];
1131 x86_perf_event_update(event
);
1134 for (i
= 0; i
< 4; i
++) {
1135 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ i
, nhm_magic
[i
]);
1136 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0
+ i
, 0x0);
1139 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0xf);
1140 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0x0);
1142 for (i
= 0; i
< 4; i
++) {
1143 event
= cpuc
->events
[i
];
1146 x86_perf_event_set_period(event
);
1147 __x86_pmu_enable_event(&event
->hw
,
1148 ARCH_PERFMON_EVENTSEL_ENABLE
);
1150 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ i
, 0x0);
1154 static void intel_pmu_nhm_enable_all(int added
)
1157 intel_pmu_nhm_workaround();
1158 intel_pmu_enable_all(added
);
1161 static inline u64
intel_pmu_get_status(void)
1165 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS
, status
);
1170 static inline void intel_pmu_ack_status(u64 ack
)
1172 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL
, ack
);
1175 static void intel_pmu_disable_fixed(struct hw_perf_event
*hwc
)
1177 int idx
= hwc
->idx
- INTEL_PMC_IDX_FIXED
;
1180 mask
= 0xfULL
<< (idx
* 4);
1182 rdmsrl(hwc
->config_base
, ctrl_val
);
1184 wrmsrl(hwc
->config_base
, ctrl_val
);
1187 static inline bool event_is_checkpointed(struct perf_event
*event
)
1189 return (event
->hw
.config
& HSW_IN_TX_CHECKPOINTED
) != 0;
1192 static void intel_pmu_disable_event(struct perf_event
*event
)
1194 struct hw_perf_event
*hwc
= &event
->hw
;
1195 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1197 if (unlikely(hwc
->idx
== INTEL_PMC_IDX_FIXED_BTS
)) {
1198 intel_pmu_disable_bts();
1199 intel_pmu_drain_bts_buffer();
1203 cpuc
->intel_ctrl_guest_mask
&= ~(1ull << hwc
->idx
);
1204 cpuc
->intel_ctrl_host_mask
&= ~(1ull << hwc
->idx
);
1205 cpuc
->intel_cp_status
&= ~(1ull << hwc
->idx
);
1208 * must disable before any actual event
1209 * because any event may be combined with LBR
1211 if (intel_pmu_needs_lbr_smpl(event
))
1212 intel_pmu_lbr_disable(event
);
1214 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
1215 intel_pmu_disable_fixed(hwc
);
1219 x86_pmu_disable_event(event
);
1221 if (unlikely(event
->attr
.precise_ip
))
1222 intel_pmu_pebs_disable(event
);
1225 static void intel_pmu_enable_fixed(struct hw_perf_event
*hwc
)
1227 int idx
= hwc
->idx
- INTEL_PMC_IDX_FIXED
;
1228 u64 ctrl_val
, bits
, mask
;
1231 * Enable IRQ generation (0x8),
1232 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1236 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_USR
)
1238 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_OS
)
1242 * ANY bit is supported in v3 and up
1244 if (x86_pmu
.version
> 2 && hwc
->config
& ARCH_PERFMON_EVENTSEL_ANY
)
1248 mask
= 0xfULL
<< (idx
* 4);
1250 rdmsrl(hwc
->config_base
, ctrl_val
);
1253 wrmsrl(hwc
->config_base
, ctrl_val
);
1256 static void intel_pmu_enable_event(struct perf_event
*event
)
1258 struct hw_perf_event
*hwc
= &event
->hw
;
1259 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1261 if (unlikely(hwc
->idx
== INTEL_PMC_IDX_FIXED_BTS
)) {
1262 if (!__this_cpu_read(cpu_hw_events
.enabled
))
1265 intel_pmu_enable_bts(hwc
->config
);
1269 * must enabled before any actual event
1270 * because any event may be combined with LBR
1272 if (intel_pmu_needs_lbr_smpl(event
))
1273 intel_pmu_lbr_enable(event
);
1275 if (event
->attr
.exclude_host
)
1276 cpuc
->intel_ctrl_guest_mask
|= (1ull << hwc
->idx
);
1277 if (event
->attr
.exclude_guest
)
1278 cpuc
->intel_ctrl_host_mask
|= (1ull << hwc
->idx
);
1280 if (unlikely(event_is_checkpointed(event
)))
1281 cpuc
->intel_cp_status
|= (1ull << hwc
->idx
);
1283 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
1284 intel_pmu_enable_fixed(hwc
);
1288 if (unlikely(event
->attr
.precise_ip
))
1289 intel_pmu_pebs_enable(event
);
1291 __x86_pmu_enable_event(hwc
, ARCH_PERFMON_EVENTSEL_ENABLE
);
1295 * Save and restart an expired event. Called by NMI contexts,
1296 * so it has to be careful about preempting normal event ops:
1298 int intel_pmu_save_and_restart(struct perf_event
*event
)
1300 x86_perf_event_update(event
);
1302 * For a checkpointed counter always reset back to 0. This
1303 * avoids a situation where the counter overflows, aborts the
1304 * transaction and is then set back to shortly before the
1305 * overflow, and overflows and aborts again.
1307 if (unlikely(event_is_checkpointed(event
))) {
1308 /* No race with NMIs because the counter should not be armed */
1309 wrmsrl(event
->hw
.event_base
, 0);
1310 local64_set(&event
->hw
.prev_count
, 0);
1312 return x86_perf_event_set_period(event
);
1315 static void intel_pmu_reset(void)
1317 struct debug_store
*ds
= __this_cpu_read(cpu_hw_events
.ds
);
1318 unsigned long flags
;
1321 if (!x86_pmu
.num_counters
)
1324 local_irq_save(flags
);
1326 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
1328 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1329 wrmsrl_safe(x86_pmu_config_addr(idx
), 0ull);
1330 wrmsrl_safe(x86_pmu_event_addr(idx
), 0ull);
1332 for (idx
= 0; idx
< x86_pmu
.num_counters_fixed
; idx
++)
1333 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0
+ idx
, 0ull);
1336 ds
->bts_index
= ds
->bts_buffer_base
;
1338 local_irq_restore(flags
);
1342 * This handler is triggered by the local APIC, so the APIC IRQ handling
1345 static int intel_pmu_handle_irq(struct pt_regs
*regs
)
1347 struct perf_sample_data data
;
1348 struct cpu_hw_events
*cpuc
;
1353 cpuc
= &__get_cpu_var(cpu_hw_events
);
1356 * No known reason to not always do late ACK,
1357 * but just in case do it opt-in.
1359 if (!x86_pmu
.late_ack
)
1360 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
1361 intel_pmu_disable_all();
1362 handled
= intel_pmu_drain_bts_buffer();
1363 status
= intel_pmu_get_status();
1365 intel_pmu_enable_all(0);
1371 intel_pmu_ack_status(status
);
1372 if (++loops
> 100) {
1373 static bool warned
= false;
1375 WARN(1, "perfevents: irq loop stuck!\n");
1376 perf_event_print_debug();
1383 inc_irq_stat(apic_perf_irqs
);
1385 intel_pmu_lbr_read();
1388 * PEBS overflow sets bit 62 in the global status register
1390 if (__test_and_clear_bit(62, (unsigned long *)&status
)) {
1392 x86_pmu
.drain_pebs(regs
);
1396 * Checkpointed counters can lead to 'spurious' PMIs because the
1397 * rollback caused by the PMI will have cleared the overflow status
1398 * bit. Therefore always force probe these counters.
1400 status
|= cpuc
->intel_cp_status
;
1402 for_each_set_bit(bit
, (unsigned long *)&status
, X86_PMC_IDX_MAX
) {
1403 struct perf_event
*event
= cpuc
->events
[bit
];
1407 if (!test_bit(bit
, cpuc
->active_mask
))
1410 if (!intel_pmu_save_and_restart(event
))
1413 perf_sample_data_init(&data
, 0, event
->hw
.last_period
);
1415 if (has_branch_stack(event
))
1416 data
.br_stack
= &cpuc
->lbr_stack
;
1418 if (perf_event_overflow(event
, &data
, regs
))
1419 x86_pmu_stop(event
, 0);
1423 * Repeat if there is more work to be done:
1425 status
= intel_pmu_get_status();
1430 intel_pmu_enable_all(0);
1432 * Only unmask the NMI after the overflow counters
1433 * have been reset. This avoids spurious NMIs on
1436 if (x86_pmu
.late_ack
)
1437 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
1441 static struct event_constraint
*
1442 intel_bts_constraints(struct perf_event
*event
)
1444 struct hw_perf_event
*hwc
= &event
->hw
;
1445 unsigned int hw_event
, bts_event
;
1447 if (event
->attr
.freq
)
1450 hw_event
= hwc
->config
& INTEL_ARCH_EVENT_MASK
;
1451 bts_event
= x86_pmu
.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS
);
1453 if (unlikely(hw_event
== bts_event
&& hwc
->sample_period
== 1))
1454 return &bts_constraint
;
1459 static int intel_alt_er(int idx
)
1461 if (!(x86_pmu
.er_flags
& ERF_HAS_RSP_1
))
1464 if (idx
== EXTRA_REG_RSP_0
)
1465 return EXTRA_REG_RSP_1
;
1467 if (idx
== EXTRA_REG_RSP_1
)
1468 return EXTRA_REG_RSP_0
;
1473 static void intel_fixup_er(struct perf_event
*event
, int idx
)
1475 event
->hw
.extra_reg
.idx
= idx
;
1477 if (idx
== EXTRA_REG_RSP_0
) {
1478 event
->hw
.config
&= ~INTEL_ARCH_EVENT_MASK
;
1479 event
->hw
.config
|= x86_pmu
.extra_regs
[EXTRA_REG_RSP_0
].event
;
1480 event
->hw
.extra_reg
.reg
= MSR_OFFCORE_RSP_0
;
1481 } else if (idx
== EXTRA_REG_RSP_1
) {
1482 event
->hw
.config
&= ~INTEL_ARCH_EVENT_MASK
;
1483 event
->hw
.config
|= x86_pmu
.extra_regs
[EXTRA_REG_RSP_1
].event
;
1484 event
->hw
.extra_reg
.reg
= MSR_OFFCORE_RSP_1
;
1489 * manage allocation of shared extra msr for certain events
1492 * per-cpu: to be shared between the various events on a single PMU
1493 * per-core: per-cpu + shared by HT threads
1495 static struct event_constraint
*
1496 __intel_shared_reg_get_constraints(struct cpu_hw_events
*cpuc
,
1497 struct perf_event
*event
,
1498 struct hw_perf_event_extra
*reg
)
1500 struct event_constraint
*c
= &emptyconstraint
;
1501 struct er_account
*era
;
1502 unsigned long flags
;
1506 * reg->alloc can be set due to existing state, so for fake cpuc we
1507 * need to ignore this, otherwise we might fail to allocate proper fake
1508 * state for this extra reg constraint. Also see the comment below.
1510 if (reg
->alloc
&& !cpuc
->is_fake
)
1511 return NULL
; /* call x86_get_event_constraint() */
1514 era
= &cpuc
->shared_regs
->regs
[idx
];
1516 * we use spin_lock_irqsave() to avoid lockdep issues when
1517 * passing a fake cpuc
1519 raw_spin_lock_irqsave(&era
->lock
, flags
);
1521 if (!atomic_read(&era
->ref
) || era
->config
== reg
->config
) {
1524 * If its a fake cpuc -- as per validate_{group,event}() we
1525 * shouldn't touch event state and we can avoid doing so
1526 * since both will only call get_event_constraints() once
1527 * on each event, this avoids the need for reg->alloc.
1529 * Not doing the ER fixup will only result in era->reg being
1530 * wrong, but since we won't actually try and program hardware
1531 * this isn't a problem either.
1533 if (!cpuc
->is_fake
) {
1534 if (idx
!= reg
->idx
)
1535 intel_fixup_er(event
, idx
);
1538 * x86_schedule_events() can call get_event_constraints()
1539 * multiple times on events in the case of incremental
1540 * scheduling(). reg->alloc ensures we only do the ER
1546 /* lock in msr value */
1547 era
->config
= reg
->config
;
1548 era
->reg
= reg
->reg
;
1551 atomic_inc(&era
->ref
);
1554 * need to call x86_get_event_constraint()
1555 * to check if associated event has constraints
1559 idx
= intel_alt_er(idx
);
1560 if (idx
!= reg
->idx
) {
1561 raw_spin_unlock_irqrestore(&era
->lock
, flags
);
1565 raw_spin_unlock_irqrestore(&era
->lock
, flags
);
1571 __intel_shared_reg_put_constraints(struct cpu_hw_events
*cpuc
,
1572 struct hw_perf_event_extra
*reg
)
1574 struct er_account
*era
;
1577 * Only put constraint if extra reg was actually allocated. Also takes
1578 * care of event which do not use an extra shared reg.
1580 * Also, if this is a fake cpuc we shouldn't touch any event state
1581 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
1582 * either since it'll be thrown out.
1584 if (!reg
->alloc
|| cpuc
->is_fake
)
1587 era
= &cpuc
->shared_regs
->regs
[reg
->idx
];
1589 /* one fewer user */
1590 atomic_dec(&era
->ref
);
1592 /* allocate again next time */
1596 static struct event_constraint
*
1597 intel_shared_regs_constraints(struct cpu_hw_events
*cpuc
,
1598 struct perf_event
*event
)
1600 struct event_constraint
*c
= NULL
, *d
;
1601 struct hw_perf_event_extra
*xreg
, *breg
;
1603 xreg
= &event
->hw
.extra_reg
;
1604 if (xreg
->idx
!= EXTRA_REG_NONE
) {
1605 c
= __intel_shared_reg_get_constraints(cpuc
, event
, xreg
);
1606 if (c
== &emptyconstraint
)
1609 breg
= &event
->hw
.branch_reg
;
1610 if (breg
->idx
!= EXTRA_REG_NONE
) {
1611 d
= __intel_shared_reg_get_constraints(cpuc
, event
, breg
);
1612 if (d
== &emptyconstraint
) {
1613 __intel_shared_reg_put_constraints(cpuc
, xreg
);
1620 struct event_constraint
*
1621 x86_get_event_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
1623 struct event_constraint
*c
;
1625 if (x86_pmu
.event_constraints
) {
1626 for_each_event_constraint(c
, x86_pmu
.event_constraints
) {
1627 if ((event
->hw
.config
& c
->cmask
) == c
->code
) {
1628 event
->hw
.flags
|= c
->flags
;
1634 return &unconstrained
;
1637 static struct event_constraint
*
1638 intel_get_event_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
1640 struct event_constraint
*c
;
1642 c
= intel_bts_constraints(event
);
1646 c
= intel_pebs_constraints(event
);
1650 c
= intel_shared_regs_constraints(cpuc
, event
);
1654 return x86_get_event_constraints(cpuc
, event
);
1658 intel_put_shared_regs_event_constraints(struct cpu_hw_events
*cpuc
,
1659 struct perf_event
*event
)
1661 struct hw_perf_event_extra
*reg
;
1663 reg
= &event
->hw
.extra_reg
;
1664 if (reg
->idx
!= EXTRA_REG_NONE
)
1665 __intel_shared_reg_put_constraints(cpuc
, reg
);
1667 reg
= &event
->hw
.branch_reg
;
1668 if (reg
->idx
!= EXTRA_REG_NONE
)
1669 __intel_shared_reg_put_constraints(cpuc
, reg
);
1672 static void intel_put_event_constraints(struct cpu_hw_events
*cpuc
,
1673 struct perf_event
*event
)
1675 intel_put_shared_regs_event_constraints(cpuc
, event
);
1678 static void intel_pebs_aliases_core2(struct perf_event
*event
)
1680 if ((event
->hw
.config
& X86_RAW_EVENT_MASK
) == 0x003c) {
1682 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1683 * (0x003c) so that we can use it with PEBS.
1685 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1686 * PEBS capable. However we can use INST_RETIRED.ANY_P
1687 * (0x00c0), which is a PEBS capable event, to get the same
1690 * INST_RETIRED.ANY_P counts the number of cycles that retires
1691 * CNTMASK instructions. By setting CNTMASK to a value (16)
1692 * larger than the maximum number of instructions that can be
1693 * retired per cycle (4) and then inverting the condition, we
1694 * count all cycles that retire 16 or less instructions, which
1697 * Thereby we gain a PEBS capable cycle counter.
1699 u64 alt_config
= X86_CONFIG(.event
=0xc0, .inv
=1, .cmask
=16);
1701 alt_config
|= (event
->hw
.config
& ~X86_RAW_EVENT_MASK
);
1702 event
->hw
.config
= alt_config
;
1706 static void intel_pebs_aliases_snb(struct perf_event
*event
)
1708 if ((event
->hw
.config
& X86_RAW_EVENT_MASK
) == 0x003c) {
1710 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1711 * (0x003c) so that we can use it with PEBS.
1713 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1714 * PEBS capable. However we can use UOPS_RETIRED.ALL
1715 * (0x01c2), which is a PEBS capable event, to get the same
1718 * UOPS_RETIRED.ALL counts the number of cycles that retires
1719 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
1720 * larger than the maximum number of micro-ops that can be
1721 * retired per cycle (4) and then inverting the condition, we
1722 * count all cycles that retire 16 or less micro-ops, which
1725 * Thereby we gain a PEBS capable cycle counter.
1727 u64 alt_config
= X86_CONFIG(.event
=0xc2, .umask
=0x01, .inv
=1, .cmask
=16);
1729 alt_config
|= (event
->hw
.config
& ~X86_RAW_EVENT_MASK
);
1730 event
->hw
.config
= alt_config
;
1734 static int intel_pmu_hw_config(struct perf_event
*event
)
1736 int ret
= x86_pmu_hw_config(event
);
1741 if (event
->attr
.precise_ip
&& x86_pmu
.pebs_aliases
)
1742 x86_pmu
.pebs_aliases(event
);
1744 if (intel_pmu_needs_lbr_smpl(event
)) {
1745 ret
= intel_pmu_setup_lbr_filter(event
);
1750 if (event
->attr
.type
!= PERF_TYPE_RAW
)
1753 if (!(event
->attr
.config
& ARCH_PERFMON_EVENTSEL_ANY
))
1756 if (x86_pmu
.version
< 3)
1759 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN
))
1762 event
->hw
.config
|= ARCH_PERFMON_EVENTSEL_ANY
;
1767 struct perf_guest_switch_msr
*perf_guest_get_msrs(int *nr
)
1769 if (x86_pmu
.guest_get_msrs
)
1770 return x86_pmu
.guest_get_msrs(nr
);
1774 EXPORT_SYMBOL_GPL(perf_guest_get_msrs
);
1776 static struct perf_guest_switch_msr
*intel_guest_get_msrs(int *nr
)
1778 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1779 struct perf_guest_switch_msr
*arr
= cpuc
->guest_switch_msrs
;
1781 arr
[0].msr
= MSR_CORE_PERF_GLOBAL_CTRL
;
1782 arr
[0].host
= x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_guest_mask
;
1783 arr
[0].guest
= x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_host_mask
;
1785 * If PMU counter has PEBS enabled it is not enough to disable counter
1786 * on a guest entry since PEBS memory write can overshoot guest entry
1787 * and corrupt guest memory. Disabling PEBS solves the problem.
1789 arr
[1].msr
= MSR_IA32_PEBS_ENABLE
;
1790 arr
[1].host
= cpuc
->pebs_enabled
;
1797 static struct perf_guest_switch_msr
*core_guest_get_msrs(int *nr
)
1799 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1800 struct perf_guest_switch_msr
*arr
= cpuc
->guest_switch_msrs
;
1803 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1804 struct perf_event
*event
= cpuc
->events
[idx
];
1806 arr
[idx
].msr
= x86_pmu_config_addr(idx
);
1807 arr
[idx
].host
= arr
[idx
].guest
= 0;
1809 if (!test_bit(idx
, cpuc
->active_mask
))
1812 arr
[idx
].host
= arr
[idx
].guest
=
1813 event
->hw
.config
| ARCH_PERFMON_EVENTSEL_ENABLE
;
1815 if (event
->attr
.exclude_host
)
1816 arr
[idx
].host
&= ~ARCH_PERFMON_EVENTSEL_ENABLE
;
1817 else if (event
->attr
.exclude_guest
)
1818 arr
[idx
].guest
&= ~ARCH_PERFMON_EVENTSEL_ENABLE
;
1821 *nr
= x86_pmu
.num_counters
;
1825 static void core_pmu_enable_event(struct perf_event
*event
)
1827 if (!event
->attr
.exclude_host
)
1828 x86_pmu_enable_event(event
);
1831 static void core_pmu_enable_all(int added
)
1833 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1836 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1837 struct hw_perf_event
*hwc
= &cpuc
->events
[idx
]->hw
;
1839 if (!test_bit(idx
, cpuc
->active_mask
) ||
1840 cpuc
->events
[idx
]->attr
.exclude_host
)
1843 __x86_pmu_enable_event(hwc
, ARCH_PERFMON_EVENTSEL_ENABLE
);
1847 static int hsw_hw_config(struct perf_event
*event
)
1849 int ret
= intel_pmu_hw_config(event
);
1853 if (!boot_cpu_has(X86_FEATURE_RTM
) && !boot_cpu_has(X86_FEATURE_HLE
))
1855 event
->hw
.config
|= event
->attr
.config
& (HSW_IN_TX
|HSW_IN_TX_CHECKPOINTED
);
1858 * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with
1859 * PEBS or in ANY thread mode. Since the results are non-sensical forbid
1862 if ((event
->hw
.config
& (HSW_IN_TX
|HSW_IN_TX_CHECKPOINTED
)) &&
1863 ((event
->hw
.config
& ARCH_PERFMON_EVENTSEL_ANY
) ||
1864 event
->attr
.precise_ip
> 0))
1867 if (event_is_checkpointed(event
)) {
1869 * Sampling of checkpointed events can cause situations where
1870 * the CPU constantly aborts because of a overflow, which is
1871 * then checkpointed back and ignored. Forbid checkpointing
1874 * But still allow a long sampling period, so that perf stat
1877 if (event
->attr
.sample_period
> 0 &&
1878 event
->attr
.sample_period
< 0x7fffffff)
1884 static struct event_constraint counter2_constraint
=
1885 EVENT_CONSTRAINT(0, 0x4, 0);
1887 static struct event_constraint
*
1888 hsw_get_event_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
1890 struct event_constraint
*c
= intel_get_event_constraints(cpuc
, event
);
1892 /* Handle special quirk on in_tx_checkpointed only in counter 2 */
1893 if (event
->hw
.config
& HSW_IN_TX_CHECKPOINTED
) {
1894 if (c
->idxmsk64
& (1U << 2))
1895 return &counter2_constraint
;
1896 return &emptyconstraint
;
1902 PMU_FORMAT_ATTR(event
, "config:0-7" );
1903 PMU_FORMAT_ATTR(umask
, "config:8-15" );
1904 PMU_FORMAT_ATTR(edge
, "config:18" );
1905 PMU_FORMAT_ATTR(pc
, "config:19" );
1906 PMU_FORMAT_ATTR(any
, "config:21" ); /* v3 + */
1907 PMU_FORMAT_ATTR(inv
, "config:23" );
1908 PMU_FORMAT_ATTR(cmask
, "config:24-31" );
1909 PMU_FORMAT_ATTR(in_tx
, "config:32");
1910 PMU_FORMAT_ATTR(in_tx_cp
, "config:33");
1912 static struct attribute
*intel_arch_formats_attr
[] = {
1913 &format_attr_event
.attr
,
1914 &format_attr_umask
.attr
,
1915 &format_attr_edge
.attr
,
1916 &format_attr_pc
.attr
,
1917 &format_attr_inv
.attr
,
1918 &format_attr_cmask
.attr
,
1922 ssize_t
intel_event_sysfs_show(char *page
, u64 config
)
1924 u64 event
= (config
& ARCH_PERFMON_EVENTSEL_EVENT
);
1926 return x86_event_sysfs_show(page
, config
, event
);
1929 static __initconst
const struct x86_pmu core_pmu
= {
1931 .handle_irq
= x86_pmu_handle_irq
,
1932 .disable_all
= x86_pmu_disable_all
,
1933 .enable_all
= core_pmu_enable_all
,
1934 .enable
= core_pmu_enable_event
,
1935 .disable
= x86_pmu_disable_event
,
1936 .hw_config
= x86_pmu_hw_config
,
1937 .schedule_events
= x86_schedule_events
,
1938 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
1939 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
1940 .event_map
= intel_pmu_event_map
,
1941 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
1944 * Intel PMCs cannot be accessed sanely above 32 bit width,
1945 * so we install an artificial 1<<31 period regardless of
1946 * the generic event period:
1948 .max_period
= (1ULL << 31) - 1,
1949 .get_event_constraints
= intel_get_event_constraints
,
1950 .put_event_constraints
= intel_put_event_constraints
,
1951 .event_constraints
= intel_core_event_constraints
,
1952 .guest_get_msrs
= core_guest_get_msrs
,
1953 .format_attrs
= intel_arch_formats_attr
,
1954 .events_sysfs_show
= intel_event_sysfs_show
,
1957 struct intel_shared_regs
*allocate_shared_regs(int cpu
)
1959 struct intel_shared_regs
*regs
;
1962 regs
= kzalloc_node(sizeof(struct intel_shared_regs
),
1963 GFP_KERNEL
, cpu_to_node(cpu
));
1966 * initialize the locks to keep lockdep happy
1968 for (i
= 0; i
< EXTRA_REG_MAX
; i
++)
1969 raw_spin_lock_init(®s
->regs
[i
].lock
);
1976 static int intel_pmu_cpu_prepare(int cpu
)
1978 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1980 if (!(x86_pmu
.extra_regs
|| x86_pmu
.lbr_sel_map
))
1983 cpuc
->shared_regs
= allocate_shared_regs(cpu
);
1984 if (!cpuc
->shared_regs
)
1990 static void intel_pmu_cpu_starting(int cpu
)
1992 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1993 int core_id
= topology_core_id(cpu
);
1996 init_debug_store_on_cpu(cpu
);
1998 * Deal with CPUs that don't clear their LBRs on power-up.
2000 intel_pmu_lbr_reset();
2002 cpuc
->lbr_sel
= NULL
;
2004 if (!cpuc
->shared_regs
)
2007 if (!(x86_pmu
.er_flags
& ERF_NO_HT_SHARING
)) {
2008 for_each_cpu(i
, topology_thread_cpumask(cpu
)) {
2009 struct intel_shared_regs
*pc
;
2011 pc
= per_cpu(cpu_hw_events
, i
).shared_regs
;
2012 if (pc
&& pc
->core_id
== core_id
) {
2013 cpuc
->kfree_on_online
= cpuc
->shared_regs
;
2014 cpuc
->shared_regs
= pc
;
2018 cpuc
->shared_regs
->core_id
= core_id
;
2019 cpuc
->shared_regs
->refcnt
++;
2022 if (x86_pmu
.lbr_sel_map
)
2023 cpuc
->lbr_sel
= &cpuc
->shared_regs
->regs
[EXTRA_REG_LBR
];
2026 static void intel_pmu_cpu_dying(int cpu
)
2028 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
2029 struct intel_shared_regs
*pc
;
2031 pc
= cpuc
->shared_regs
;
2033 if (pc
->core_id
== -1 || --pc
->refcnt
== 0)
2035 cpuc
->shared_regs
= NULL
;
2038 fini_debug_store_on_cpu(cpu
);
2041 static void intel_pmu_flush_branch_stack(void)
2044 * Intel LBR does not tag entries with the
2045 * PID of the current task, then we need to
2047 * For now, we simply reset it
2050 intel_pmu_lbr_reset();
2053 PMU_FORMAT_ATTR(offcore_rsp
, "config1:0-63");
2055 PMU_FORMAT_ATTR(ldlat
, "config1:0-15");
2057 static struct attribute
*intel_arch3_formats_attr
[] = {
2058 &format_attr_event
.attr
,
2059 &format_attr_umask
.attr
,
2060 &format_attr_edge
.attr
,
2061 &format_attr_pc
.attr
,
2062 &format_attr_any
.attr
,
2063 &format_attr_inv
.attr
,
2064 &format_attr_cmask
.attr
,
2065 &format_attr_in_tx
.attr
,
2066 &format_attr_in_tx_cp
.attr
,
2068 &format_attr_offcore_rsp
.attr
, /* XXX do NHM/WSM + SNB breakout */
2069 &format_attr_ldlat
.attr
, /* PEBS load latency */
2073 static __initconst
const struct x86_pmu intel_pmu
= {
2075 .handle_irq
= intel_pmu_handle_irq
,
2076 .disable_all
= intel_pmu_disable_all
,
2077 .enable_all
= intel_pmu_enable_all
,
2078 .enable
= intel_pmu_enable_event
,
2079 .disable
= intel_pmu_disable_event
,
2080 .hw_config
= intel_pmu_hw_config
,
2081 .schedule_events
= x86_schedule_events
,
2082 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
2083 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
2084 .event_map
= intel_pmu_event_map
,
2085 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
2088 * Intel PMCs cannot be accessed sanely above 32 bit width,
2089 * so we install an artificial 1<<31 period regardless of
2090 * the generic event period:
2092 .max_period
= (1ULL << 31) - 1,
2093 .get_event_constraints
= intel_get_event_constraints
,
2094 .put_event_constraints
= intel_put_event_constraints
,
2095 .pebs_aliases
= intel_pebs_aliases_core2
,
2097 .format_attrs
= intel_arch3_formats_attr
,
2098 .events_sysfs_show
= intel_event_sysfs_show
,
2100 .cpu_prepare
= intel_pmu_cpu_prepare
,
2101 .cpu_starting
= intel_pmu_cpu_starting
,
2102 .cpu_dying
= intel_pmu_cpu_dying
,
2103 .guest_get_msrs
= intel_guest_get_msrs
,
2104 .flush_branch_stack
= intel_pmu_flush_branch_stack
,
2107 static __init
void intel_clovertown_quirk(void)
2110 * PEBS is unreliable due to:
2112 * AJ67 - PEBS may experience CPL leaks
2113 * AJ68 - PEBS PMI may be delayed by one event
2114 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
2115 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
2117 * AJ67 could be worked around by restricting the OS/USR flags.
2118 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
2120 * AJ106 could possibly be worked around by not allowing LBR
2121 * usage from PEBS, including the fixup.
2122 * AJ68 could possibly be worked around by always programming
2123 * a pebs_event_reset[0] value and coping with the lost events.
2125 * But taken together it might just make sense to not enable PEBS on
2128 pr_warn("PEBS disabled due to CPU errata\n");
2130 x86_pmu
.pebs_constraints
= NULL
;
2133 static int intel_snb_pebs_broken(int cpu
)
2135 u32 rev
= UINT_MAX
; /* default to broken for unknown models */
2137 switch (cpu_data(cpu
).x86_model
) {
2142 case 45: /* SNB-EP */
2143 switch (cpu_data(cpu
).x86_mask
) {
2144 case 6: rev
= 0x618; break;
2145 case 7: rev
= 0x70c; break;
2149 return (cpu_data(cpu
).microcode
< rev
);
2152 static void intel_snb_check_microcode(void)
2154 int pebs_broken
= 0;
2158 for_each_online_cpu(cpu
) {
2159 if ((pebs_broken
= intel_snb_pebs_broken(cpu
)))
2164 if (pebs_broken
== x86_pmu
.pebs_broken
)
2168 * Serialized by the microcode lock..
2170 if (x86_pmu
.pebs_broken
) {
2171 pr_info("PEBS enabled due to microcode update\n");
2172 x86_pmu
.pebs_broken
= 0;
2174 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
2175 x86_pmu
.pebs_broken
= 1;
2179 static __init
void intel_sandybridge_quirk(void)
2181 x86_pmu
.check_microcode
= intel_snb_check_microcode
;
2182 intel_snb_check_microcode();
2185 static const struct { int id
; char *name
; } intel_arch_events_map
[] __initconst
= {
2186 { PERF_COUNT_HW_CPU_CYCLES
, "cpu cycles" },
2187 { PERF_COUNT_HW_INSTRUCTIONS
, "instructions" },
2188 { PERF_COUNT_HW_BUS_CYCLES
, "bus cycles" },
2189 { PERF_COUNT_HW_CACHE_REFERENCES
, "cache references" },
2190 { PERF_COUNT_HW_CACHE_MISSES
, "cache misses" },
2191 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS
, "branch instructions" },
2192 { PERF_COUNT_HW_BRANCH_MISSES
, "branch misses" },
2195 static __init
void intel_arch_events_quirk(void)
2199 /* disable event that reported as not presend by cpuid */
2200 for_each_set_bit(bit
, x86_pmu
.events_mask
, ARRAY_SIZE(intel_arch_events_map
)) {
2201 intel_perfmon_event_map
[intel_arch_events_map
[bit
].id
] = 0;
2202 pr_warn("CPUID marked event: \'%s\' unavailable\n",
2203 intel_arch_events_map
[bit
].name
);
2207 static __init
void intel_nehalem_quirk(void)
2209 union cpuid10_ebx ebx
;
2211 ebx
.full
= x86_pmu
.events_maskl
;
2212 if (ebx
.split
.no_branch_misses_retired
) {
2214 * Erratum AAJ80 detected, we work it around by using
2215 * the BR_MISP_EXEC.ANY event. This will over-count
2216 * branch-misses, but it's still much better than the
2217 * architectural event which is often completely bogus:
2219 intel_perfmon_event_map
[PERF_COUNT_HW_BRANCH_MISSES
] = 0x7f89;
2220 ebx
.split
.no_branch_misses_retired
= 0;
2221 x86_pmu
.events_maskl
= ebx
.full
;
2222 pr_info("CPU erratum AAJ80 worked around\n");
2226 EVENT_ATTR_STR(mem
-loads
, mem_ld_hsw
, "event=0xcd,umask=0x1,ldlat=3");
2227 EVENT_ATTR_STR(mem
-stores
, mem_st_hsw
, "event=0xd0,umask=0x82")
2229 /* Haswell special events */
2230 EVENT_ATTR_STR(tx
-start
, tx_start
, "event=0xc9,umask=0x1");
2231 EVENT_ATTR_STR(tx
-commit
, tx_commit
, "event=0xc9,umask=0x2");
2232 EVENT_ATTR_STR(tx
-abort
, tx_abort
, "event=0xc9,umask=0x4");
2233 EVENT_ATTR_STR(tx
-capacity
, tx_capacity
, "event=0x54,umask=0x2");
2234 EVENT_ATTR_STR(tx
-conflict
, tx_conflict
, "event=0x54,umask=0x1");
2235 EVENT_ATTR_STR(el
-start
, el_start
, "event=0xc8,umask=0x1");
2236 EVENT_ATTR_STR(el
-commit
, el_commit
, "event=0xc8,umask=0x2");
2237 EVENT_ATTR_STR(el
-abort
, el_abort
, "event=0xc8,umask=0x4");
2238 EVENT_ATTR_STR(el
-capacity
, el_capacity
, "event=0x54,umask=0x2");
2239 EVENT_ATTR_STR(el
-conflict
, el_conflict
, "event=0x54,umask=0x1");
2240 EVENT_ATTR_STR(cycles
-t
, cycles_t
, "event=0x3c,in_tx=1");
2241 EVENT_ATTR_STR(cycles
-ct
, cycles_ct
, "event=0x3c,in_tx=1,in_tx_cp=1");
2243 static struct attribute
*hsw_events_attrs
[] = {
2244 EVENT_PTR(tx_start
),
2245 EVENT_PTR(tx_commit
),
2246 EVENT_PTR(tx_abort
),
2247 EVENT_PTR(tx_capacity
),
2248 EVENT_PTR(tx_conflict
),
2249 EVENT_PTR(el_start
),
2250 EVENT_PTR(el_commit
),
2251 EVENT_PTR(el_abort
),
2252 EVENT_PTR(el_capacity
),
2253 EVENT_PTR(el_conflict
),
2254 EVENT_PTR(cycles_t
),
2255 EVENT_PTR(cycles_ct
),
2256 EVENT_PTR(mem_ld_hsw
),
2257 EVENT_PTR(mem_st_hsw
),
2261 __init
int intel_pmu_init(void)
2263 union cpuid10_edx edx
;
2264 union cpuid10_eax eax
;
2265 union cpuid10_ebx ebx
;
2266 struct event_constraint
*c
;
2267 unsigned int unused
;
2270 if (!cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
)) {
2271 switch (boot_cpu_data
.x86
) {
2273 return p6_pmu_init();
2275 return knc_pmu_init();
2277 return p4_pmu_init();
2283 * Check whether the Architectural PerfMon supports
2284 * Branch Misses Retired hw_event or not.
2286 cpuid(10, &eax
.full
, &ebx
.full
, &unused
, &edx
.full
);
2287 if (eax
.split
.mask_length
< ARCH_PERFMON_EVENTS_COUNT
)
2290 version
= eax
.split
.version_id
;
2294 x86_pmu
= intel_pmu
;
2296 x86_pmu
.version
= version
;
2297 x86_pmu
.num_counters
= eax
.split
.num_counters
;
2298 x86_pmu
.cntval_bits
= eax
.split
.bit_width
;
2299 x86_pmu
.cntval_mask
= (1ULL << eax
.split
.bit_width
) - 1;
2301 x86_pmu
.events_maskl
= ebx
.full
;
2302 x86_pmu
.events_mask_len
= eax
.split
.mask_length
;
2304 x86_pmu
.max_pebs_events
= min_t(unsigned, MAX_PEBS_EVENTS
, x86_pmu
.num_counters
);
2307 * Quirk: v2 perfmon does not report fixed-purpose events, so
2308 * assume at least 3 events:
2311 x86_pmu
.num_counters_fixed
= max((int)edx
.split
.num_counters_fixed
, 3);
2314 * v2 and above have a perf capabilities MSR
2319 rdmsrl(MSR_IA32_PERF_CAPABILITIES
, capabilities
);
2320 x86_pmu
.intel_cap
.capabilities
= capabilities
;
2325 x86_add_quirk(intel_arch_events_quirk
); /* Install first, so it runs last */
2328 * Install the hw-cache-events table:
2330 switch (boot_cpu_data
.x86_model
) {
2331 case 14: /* 65 nm core solo/duo, "Yonah" */
2332 pr_cont("Core events, ");
2335 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
2336 x86_add_quirk(intel_clovertown_quirk
);
2337 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
2338 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
2339 case 29: /* six-core 45 nm xeon "Dunnington" */
2340 memcpy(hw_cache_event_ids
, core2_hw_cache_event_ids
,
2341 sizeof(hw_cache_event_ids
));
2343 intel_pmu_lbr_init_core();
2345 x86_pmu
.event_constraints
= intel_core2_event_constraints
;
2346 x86_pmu
.pebs_constraints
= intel_core2_pebs_event_constraints
;
2347 pr_cont("Core2 events, ");
2350 case 26: /* 45 nm nehalem, "Bloomfield" */
2351 case 30: /* 45 nm nehalem, "Lynnfield" */
2352 case 46: /* 45 nm nehalem-ex, "Beckton" */
2353 memcpy(hw_cache_event_ids
, nehalem_hw_cache_event_ids
,
2354 sizeof(hw_cache_event_ids
));
2355 memcpy(hw_cache_extra_regs
, nehalem_hw_cache_extra_regs
,
2356 sizeof(hw_cache_extra_regs
));
2358 intel_pmu_lbr_init_nhm();
2360 x86_pmu
.event_constraints
= intel_nehalem_event_constraints
;
2361 x86_pmu
.pebs_constraints
= intel_nehalem_pebs_event_constraints
;
2362 x86_pmu
.enable_all
= intel_pmu_nhm_enable_all
;
2363 x86_pmu
.extra_regs
= intel_nehalem_extra_regs
;
2365 x86_pmu
.cpu_events
= nhm_events_attrs
;
2367 /* UOPS_ISSUED.STALLED_CYCLES */
2368 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2369 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2370 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
2371 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
2372 X86_CONFIG(.event
=0xb1, .umask
=0x3f, .inv
=1, .cmask
=1);
2374 x86_add_quirk(intel_nehalem_quirk
);
2376 pr_cont("Nehalem events, ");
2380 case 38: /* Lincroft */
2381 case 39: /* Penwell */
2382 case 53: /* Cloverview */
2383 case 54: /* Cedarview */
2384 memcpy(hw_cache_event_ids
, atom_hw_cache_event_ids
,
2385 sizeof(hw_cache_event_ids
));
2387 intel_pmu_lbr_init_atom();
2389 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
2390 x86_pmu
.pebs_constraints
= intel_atom_pebs_event_constraints
;
2391 pr_cont("Atom events, ");
2394 case 55: /* Atom 22nm "Silvermont" */
2395 case 77: /* Avoton "Silvermont" */
2396 memcpy(hw_cache_event_ids
, slm_hw_cache_event_ids
,
2397 sizeof(hw_cache_event_ids
));
2398 memcpy(hw_cache_extra_regs
, slm_hw_cache_extra_regs
,
2399 sizeof(hw_cache_extra_regs
));
2401 intel_pmu_lbr_init_atom();
2403 x86_pmu
.event_constraints
= intel_slm_event_constraints
;
2404 x86_pmu
.pebs_constraints
= intel_slm_pebs_event_constraints
;
2405 x86_pmu
.extra_regs
= intel_slm_extra_regs
;
2406 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2407 pr_cont("Silvermont events, ");
2410 case 37: /* 32 nm nehalem, "Clarkdale" */
2411 case 44: /* 32 nm nehalem, "Gulftown" */
2412 case 47: /* 32 nm Xeon E7 */
2413 memcpy(hw_cache_event_ids
, westmere_hw_cache_event_ids
,
2414 sizeof(hw_cache_event_ids
));
2415 memcpy(hw_cache_extra_regs
, nehalem_hw_cache_extra_regs
,
2416 sizeof(hw_cache_extra_regs
));
2418 intel_pmu_lbr_init_nhm();
2420 x86_pmu
.event_constraints
= intel_westmere_event_constraints
;
2421 x86_pmu
.enable_all
= intel_pmu_nhm_enable_all
;
2422 x86_pmu
.pebs_constraints
= intel_westmere_pebs_event_constraints
;
2423 x86_pmu
.extra_regs
= intel_westmere_extra_regs
;
2424 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2426 x86_pmu
.cpu_events
= nhm_events_attrs
;
2428 /* UOPS_ISSUED.STALLED_CYCLES */
2429 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2430 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2431 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
2432 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
2433 X86_CONFIG(.event
=0xb1, .umask
=0x3f, .inv
=1, .cmask
=1);
2435 pr_cont("Westmere events, ");
2438 case 42: /* SandyBridge */
2439 case 45: /* SandyBridge, "Romely-EP" */
2440 x86_add_quirk(intel_sandybridge_quirk
);
2441 memcpy(hw_cache_event_ids
, snb_hw_cache_event_ids
,
2442 sizeof(hw_cache_event_ids
));
2443 memcpy(hw_cache_extra_regs
, snb_hw_cache_extra_regs
,
2444 sizeof(hw_cache_extra_regs
));
2446 intel_pmu_lbr_init_snb();
2448 x86_pmu
.event_constraints
= intel_snb_event_constraints
;
2449 x86_pmu
.pebs_constraints
= intel_snb_pebs_event_constraints
;
2450 x86_pmu
.pebs_aliases
= intel_pebs_aliases_snb
;
2451 if (boot_cpu_data
.x86_model
== 45)
2452 x86_pmu
.extra_regs
= intel_snbep_extra_regs
;
2454 x86_pmu
.extra_regs
= intel_snb_extra_regs
;
2455 /* all extra regs are per-cpu when HT is on */
2456 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2457 x86_pmu
.er_flags
|= ERF_NO_HT_SHARING
;
2459 x86_pmu
.cpu_events
= snb_events_attrs
;
2461 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
2462 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2463 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2464 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
2465 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
2466 X86_CONFIG(.event
=0xb1, .umask
=0x01, .inv
=1, .cmask
=1);
2468 pr_cont("SandyBridge events, ");
2470 case 58: /* IvyBridge */
2471 case 62: /* IvyBridge EP */
2472 memcpy(hw_cache_event_ids
, snb_hw_cache_event_ids
,
2473 sizeof(hw_cache_event_ids
));
2474 memcpy(hw_cache_extra_regs
, snb_hw_cache_extra_regs
,
2475 sizeof(hw_cache_extra_regs
));
2477 intel_pmu_lbr_init_snb();
2479 x86_pmu
.event_constraints
= intel_ivb_event_constraints
;
2480 x86_pmu
.pebs_constraints
= intel_ivb_pebs_event_constraints
;
2481 x86_pmu
.pebs_aliases
= intel_pebs_aliases_snb
;
2482 if (boot_cpu_data
.x86_model
== 62)
2483 x86_pmu
.extra_regs
= intel_snbep_extra_regs
;
2485 x86_pmu
.extra_regs
= intel_snb_extra_regs
;
2486 /* all extra regs are per-cpu when HT is on */
2487 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2488 x86_pmu
.er_flags
|= ERF_NO_HT_SHARING
;
2490 x86_pmu
.cpu_events
= snb_events_attrs
;
2492 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
2493 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
2494 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
2496 pr_cont("IvyBridge events, ");
2500 case 60: /* Haswell Client */
2505 x86_pmu
.late_ack
= true;
2506 memcpy(hw_cache_event_ids
, snb_hw_cache_event_ids
, sizeof(hw_cache_event_ids
));
2507 memcpy(hw_cache_extra_regs
, snb_hw_cache_extra_regs
, sizeof(hw_cache_extra_regs
));
2509 intel_pmu_lbr_init_snb();
2511 x86_pmu
.event_constraints
= intel_hsw_event_constraints
;
2512 x86_pmu
.pebs_constraints
= intel_hsw_pebs_event_constraints
;
2513 x86_pmu
.extra_regs
= intel_snb_extra_regs
;
2514 x86_pmu
.pebs_aliases
= intel_pebs_aliases_snb
;
2515 /* all extra regs are per-cpu when HT is on */
2516 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
2517 x86_pmu
.er_flags
|= ERF_NO_HT_SHARING
;
2519 x86_pmu
.hw_config
= hsw_hw_config
;
2520 x86_pmu
.get_event_constraints
= hsw_get_event_constraints
;
2521 x86_pmu
.cpu_events
= hsw_events_attrs
;
2522 x86_pmu
.lbr_double_abort
= true;
2523 pr_cont("Haswell events, ");
2527 switch (x86_pmu
.version
) {
2529 x86_pmu
.event_constraints
= intel_v1_event_constraints
;
2530 pr_cont("generic architected perfmon v1, ");
2534 * default constraints for v2 and up
2536 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
2537 pr_cont("generic architected perfmon, ");
2542 if (x86_pmu
.num_counters
> INTEL_PMC_MAX_GENERIC
) {
2543 WARN(1, KERN_ERR
"hw perf events %d > max(%d), clipping!",
2544 x86_pmu
.num_counters
, INTEL_PMC_MAX_GENERIC
);
2545 x86_pmu
.num_counters
= INTEL_PMC_MAX_GENERIC
;
2547 x86_pmu
.intel_ctrl
= (1 << x86_pmu
.num_counters
) - 1;
2549 if (x86_pmu
.num_counters_fixed
> INTEL_PMC_MAX_FIXED
) {
2550 WARN(1, KERN_ERR
"hw perf events fixed %d > max(%d), clipping!",
2551 x86_pmu
.num_counters_fixed
, INTEL_PMC_MAX_FIXED
);
2552 x86_pmu
.num_counters_fixed
= INTEL_PMC_MAX_FIXED
;
2555 x86_pmu
.intel_ctrl
|=
2556 ((1LL << x86_pmu
.num_counters_fixed
)-1) << INTEL_PMC_IDX_FIXED
;
2558 if (x86_pmu
.event_constraints
) {
2560 * event on fixed counter2 (REF_CYCLES) only works on this
2561 * counter, so do not extend mask to generic counters
2563 for_each_event_constraint(c
, x86_pmu
.event_constraints
) {
2564 if (c
->cmask
!= FIXED_EVENT_FLAGS
2565 || c
->idxmsk64
== INTEL_PMC_MSK_FIXED_REF_CYCLES
) {
2569 c
->idxmsk64
|= (1ULL << x86_pmu
.num_counters
) - 1;
2570 c
->weight
+= x86_pmu
.num_counters
;
2574 /* Support full width counters using alternative MSR range */
2575 if (x86_pmu
.intel_cap
.full_width_write
) {
2576 x86_pmu
.max_period
= x86_pmu
.cntval_mask
;
2577 x86_pmu
.perfctr
= MSR_IA32_PMC0
;
2578 pr_cont("full-width counters, ");