2 * Performance counter support for POWER8 processors.
4 * Copyright 2009 Paul Mackerras, IBM Corporation.
5 * Copyright 2013 Michael Ellerman, IBM Corporation.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #define pr_fmt(fmt) "power8-pmu: " fmt
15 #include <linux/kernel.h>
16 #include <linux/perf_event.h>
17 #include <asm/firmware.h>
18 #include <asm/cputable.h>
21 * Some power8 event codes.
23 #define EVENT(_name, _code) _name = _code,
26 #include "power8-events-list.h"
32 * Raw event encoding for POWER8:
34 * 60 56 52 48 44 40 36 32
35 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
36 * | | [ ] [ thresh_cmp ] [ thresh_ctl ]
38 * | | *- IFM (Linux) thresh start/stop OR FAB match -*
42 * 28 24 20 16 12 8 4 0
43 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
44 * [ ] [ sample ] [cache] [ pmc ] [unit ] c m [ pmcxsel ]
47 * | | *- L1/L2/L3 cache_sel |
49 * | *- sampling mode for marked events *- combine
53 * Below uses IBM bit numbering.
55 * MMCR1[x:y] = unit (PMCxUNIT)
56 * MMCR1[x] = combine (PMCxCOMB)
58 * if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011
59 * # PM_MRK_FAB_RSP_MATCH
60 * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH)
61 * else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001
62 * # PM_MRK_FAB_RSP_MATCH_CYC
63 * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH)
65 * MMCRA[48:55] = thresh_ctl (THRESH START/END)
68 * MMCRA[45:47] = thresh_sel
71 * MMCRA[22:24] = thresh_cmp[0:2]
72 * MMCRA[25:31] = thresh_cmp[3:9]
74 * if unit == 6 or unit == 7
75 * MMCRC[53:55] = cache_sel[1:3] (L2EVENT_SEL)
76 * else if unit == 8 or unit == 9:
77 * if cache_sel[0] == 0: # L3 bank
78 * MMCRC[47:49] = cache_sel[1:3] (L3EVENT_SEL0)
79 * else if cache_sel[0] == 1:
80 * MMCRC[50:51] = cache_sel[2:3] (L3EVENT_SEL1)
81 * else if cache_sel[1]: # L1 event
82 * MMCR1[16] = cache_sel[2]
83 * MMCR1[17] = cache_sel[3]
86 * MMCRA[63] = 1 (SAMPLE_ENABLE)
87 * MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG)
88 * MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE)
95 #define EVENT_EBB_MASK 1ull
96 #define EVENT_EBB_SHIFT PERF_EVENT_CONFIG_EBB_SHIFT
97 #define EVENT_BHRB_MASK 1ull
98 #define EVENT_BHRB_SHIFT 62
99 #define EVENT_WANTS_BHRB (EVENT_BHRB_MASK << EVENT_BHRB_SHIFT)
100 #define EVENT_IFM_MASK 3ull
101 #define EVENT_IFM_SHIFT 60
102 #define EVENT_THR_CMP_SHIFT 40 /* Threshold CMP value */
103 #define EVENT_THR_CMP_MASK 0x3ff
104 #define EVENT_THR_CTL_SHIFT 32 /* Threshold control value (start/stop) */
105 #define EVENT_THR_CTL_MASK 0xffull
106 #define EVENT_THR_SEL_SHIFT 29 /* Threshold select value */
107 #define EVENT_THR_SEL_MASK 0x7
108 #define EVENT_THRESH_SHIFT 29 /* All threshold bits */
109 #define EVENT_THRESH_MASK 0x1fffffull
110 #define EVENT_SAMPLE_SHIFT 24 /* Sampling mode & eligibility */
111 #define EVENT_SAMPLE_MASK 0x1f
112 #define EVENT_CACHE_SEL_SHIFT 20 /* L2/L3 cache select */
113 #define EVENT_CACHE_SEL_MASK 0xf
114 #define EVENT_IS_L1 (4 << EVENT_CACHE_SEL_SHIFT)
115 #define EVENT_PMC_SHIFT 16 /* PMC number (1-based) */
116 #define EVENT_PMC_MASK 0xf
117 #define EVENT_UNIT_SHIFT 12 /* Unit */
118 #define EVENT_UNIT_MASK 0xf
119 #define EVENT_COMBINE_SHIFT 11 /* Combine bit */
120 #define EVENT_COMBINE_MASK 0x1
121 #define EVENT_MARKED_SHIFT 8 /* Marked bit */
122 #define EVENT_MARKED_MASK 0x1
123 #define EVENT_IS_MARKED (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT)
124 #define EVENT_PSEL_MASK 0xff /* PMCxSEL value */
126 /* Bits defined by Linux */
127 #define EVENT_LINUX_MASK \
128 ((EVENT_EBB_MASK << EVENT_EBB_SHIFT) | \
129 (EVENT_BHRB_MASK << EVENT_BHRB_SHIFT) | \
130 (EVENT_IFM_MASK << EVENT_IFM_SHIFT))
132 #define EVENT_VALID_MASK \
133 ((EVENT_THRESH_MASK << EVENT_THRESH_SHIFT) | \
134 (EVENT_SAMPLE_MASK << EVENT_SAMPLE_SHIFT) | \
135 (EVENT_CACHE_SEL_MASK << EVENT_CACHE_SEL_SHIFT) | \
136 (EVENT_PMC_MASK << EVENT_PMC_SHIFT) | \
137 (EVENT_UNIT_MASK << EVENT_UNIT_SHIFT) | \
138 (EVENT_COMBINE_MASK << EVENT_COMBINE_SHIFT) | \
139 (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) | \
143 /* MMCRA IFM bits - POWER8 */
144 #define POWER8_MMCRA_IFM1 0x0000000040000000UL
145 #define POWER8_MMCRA_IFM2 0x0000000080000000UL
146 #define POWER8_MMCRA_IFM3 0x00000000C0000000UL
149 (PERF_SAMPLE_BRANCH_USER |\
150 PERF_SAMPLE_BRANCH_KERNEL |\
151 PERF_SAMPLE_BRANCH_HV)
154 * Layout of constraint bits:
156 * 60 56 52 48 44 40 36 32
157 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
158 * [ fab_match ] [ thresh_cmp ] [ thresh_ctl ] [ ]
162 * 28 24 20 16 12 8 4 0
163 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
164 * [ ] | [ ] [ sample ] [ ] [6] [5] [4] [3] [2] [1]
166 * BHRB IFM -* | | | Count of events for each PMC.
167 * EBB -* | | p1, p2, p3, p4, p5, p6.
168 * L1 I/D qualifier -* |
169 * nc - number of counters -*
171 * The PMC fields P1..P6, and NC, are adder fields. As we accumulate constraints
172 * we want the low bit of each field to be added to any existing value.
174 * Everything else is a value field.
177 #define CNST_FAB_MATCH_VAL(v) (((v) & EVENT_THR_CTL_MASK) << 56)
178 #define CNST_FAB_MATCH_MASK CNST_FAB_MATCH_VAL(EVENT_THR_CTL_MASK)
180 /* We just throw all the threshold bits into the constraint */
181 #define CNST_THRESH_VAL(v) (((v) & EVENT_THRESH_MASK) << 32)
182 #define CNST_THRESH_MASK CNST_THRESH_VAL(EVENT_THRESH_MASK)
184 #define CNST_EBB_VAL(v) (((v) & EVENT_EBB_MASK) << 24)
185 #define CNST_EBB_MASK CNST_EBB_VAL(EVENT_EBB_MASK)
187 #define CNST_IFM_VAL(v) (((v) & EVENT_IFM_MASK) << 25)
188 #define CNST_IFM_MASK CNST_IFM_VAL(EVENT_IFM_MASK)
190 #define CNST_L1_QUAL_VAL(v) (((v) & 3) << 22)
191 #define CNST_L1_QUAL_MASK CNST_L1_QUAL_VAL(3)
193 #define CNST_SAMPLE_VAL(v) (((v) & EVENT_SAMPLE_MASK) << 16)
194 #define CNST_SAMPLE_MASK CNST_SAMPLE_VAL(EVENT_SAMPLE_MASK)
197 * For NC we are counting up to 4 events. This requires three bits, and we need
198 * the fifth event to overflow and set the 4th bit. To achieve that we bias the
199 * fields by 3 in test_adder.
201 #define CNST_NC_SHIFT 12
202 #define CNST_NC_VAL (1 << CNST_NC_SHIFT)
203 #define CNST_NC_MASK (8 << CNST_NC_SHIFT)
204 #define POWER8_TEST_ADDER (3 << CNST_NC_SHIFT)
207 * For the per-PMC fields we have two bits. The low bit is added, so if two
208 * events ask for the same PMC the sum will overflow, setting the high bit,
209 * indicating an error. So our mask sets the high bit.
211 #define CNST_PMC_SHIFT(pmc) ((pmc - 1) * 2)
212 #define CNST_PMC_VAL(pmc) (1 << CNST_PMC_SHIFT(pmc))
213 #define CNST_PMC_MASK(pmc) (2 << CNST_PMC_SHIFT(pmc))
215 /* Our add_fields is defined as: */
216 #define POWER8_ADD_FIELDS \
217 CNST_PMC_VAL(1) | CNST_PMC_VAL(2) | CNST_PMC_VAL(3) | \
218 CNST_PMC_VAL(4) | CNST_PMC_VAL(5) | CNST_PMC_VAL(6) | CNST_NC_VAL
221 /* Bits in MMCR1 for POWER8 */
222 #define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1)))
223 #define MMCR1_COMBINE_SHIFT(pmc) (35 - ((pmc) - 1))
224 #define MMCR1_PMCSEL_SHIFT(pmc) (24 - (((pmc) - 1)) * 8)
225 #define MMCR1_FAB_SHIFT 36
226 #define MMCR1_DC_QUAL_SHIFT 47
227 #define MMCR1_IC_QUAL_SHIFT 46
229 /* Bits in MMCRA for POWER8 */
230 #define MMCRA_SAMP_MODE_SHIFT 1
231 #define MMCRA_SAMP_ELIG_SHIFT 4
232 #define MMCRA_THR_CTL_SHIFT 8
233 #define MMCRA_THR_SEL_SHIFT 16
234 #define MMCRA_THR_CMP_SHIFT 32
235 #define MMCRA_SDAR_MODE_TLB (1ull << 42)
236 #define MMCRA_IFM_SHIFT 30
238 /* Bits in MMCR2 for POWER8 */
239 #define MMCR2_FCS(pmc) (1ull << (63 - (((pmc) - 1) * 9)))
240 #define MMCR2_FCP(pmc) (1ull << (62 - (((pmc) - 1) * 9)))
241 #define MMCR2_FCH(pmc) (1ull << (57 - (((pmc) - 1) * 9)))
244 static inline bool event_is_fab_match(u64 event
)
246 /* Only check pmc, unit and pmcxsel, ignore the edge bit (0) */
249 /* PM_MRK_FAB_RSP_MATCH & PM_MRK_FAB_RSP_MATCH_CYC */
250 return (event
== 0x30056 || event
== 0x4f052);
253 static int power8_get_constraint(u64 event
, unsigned long *maskp
, unsigned long *valp
)
255 unsigned int unit
, pmc
, cache
, ebb
;
256 unsigned long mask
, value
;
260 if (event
& ~EVENT_VALID_MASK
)
263 pmc
= (event
>> EVENT_PMC_SHIFT
) & EVENT_PMC_MASK
;
264 unit
= (event
>> EVENT_UNIT_SHIFT
) & EVENT_UNIT_MASK
;
265 cache
= (event
>> EVENT_CACHE_SEL_SHIFT
) & EVENT_CACHE_SEL_MASK
;
266 ebb
= (event
>> EVENT_EBB_SHIFT
) & EVENT_EBB_MASK
;
274 /* Ignore Linux defined bits when checking event below */
275 base_event
= event
& ~EVENT_LINUX_MASK
;
277 if (pmc
>= 5 && base_event
!= PM_RUN_INST_CMPL
&&
278 base_event
!= PM_RUN_CYC
)
281 mask
|= CNST_PMC_MASK(pmc
);
282 value
|= CNST_PMC_VAL(pmc
);
287 * Add to number of counters in use. Note this includes events with
288 * a PMC of 0 - they still need a PMC, it's just assigned later.
289 * Don't count events on PMC 5 & 6, there is only one valid event
290 * on each of those counters, and they are handled above.
292 mask
|= CNST_NC_MASK
;
293 value
|= CNST_NC_VAL
;
296 if (unit
>= 6 && unit
<= 9) {
298 * L2/L3 events contain a cache selector field, which is
299 * supposed to be programmed into MMCRC. However MMCRC is only
300 * HV writable, and there is no API for guest kernels to modify
301 * it. The solution is for the hypervisor to initialise the
302 * field to zeroes, and for us to only ever allow events that
303 * have a cache selector of zero. The bank selector (bit 3) is
304 * irrelevant, as long as the rest of the value is 0.
309 } else if (event
& EVENT_IS_L1
) {
310 mask
|= CNST_L1_QUAL_MASK
;
311 value
|= CNST_L1_QUAL_VAL(cache
);
314 if (event
& EVENT_IS_MARKED
) {
315 mask
|= CNST_SAMPLE_MASK
;
316 value
|= CNST_SAMPLE_VAL(event
>> EVENT_SAMPLE_SHIFT
);
320 * Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
321 * the threshold control bits are used for the match value.
323 if (event_is_fab_match(event
)) {
324 mask
|= CNST_FAB_MATCH_MASK
;
325 value
|= CNST_FAB_MATCH_VAL(event
>> EVENT_THR_CTL_SHIFT
);
328 * Check the mantissa upper two bits are not zero, unless the
329 * exponent is also zero. See the THRESH_CMP_MANTISSA doc.
331 unsigned int cmp
, exp
;
333 cmp
= (event
>> EVENT_THR_CMP_SHIFT
) & EVENT_THR_CMP_MASK
;
336 if (exp
&& (cmp
& 0x60) == 0)
339 mask
|= CNST_THRESH_MASK
;
340 value
|= CNST_THRESH_VAL(event
>> EVENT_THRESH_SHIFT
);
344 /* EBB events must specify the PMC */
347 if (event
& EVENT_WANTS_BHRB
) {
349 /* Only EBB events can request BHRB */
352 mask
|= CNST_IFM_MASK
;
353 value
|= CNST_IFM_VAL(event
>> EVENT_IFM_SHIFT
);
357 * All events must agree on EBB, either all request it or none.
358 * EBB events are pinned & exclusive, so this should never actually
359 * hit, but we leave it as a fallback in case.
361 mask
|= CNST_EBB_VAL(ebb
);
362 value
|= CNST_EBB_MASK
;
370 static int power8_compute_mmcr(u64 event
[], int n_ev
,
371 unsigned int hwc
[], unsigned long mmcr
[],
372 struct perf_event
*pevents
[])
374 unsigned long mmcra
, mmcr1
, mmcr2
, unit
, combine
, psel
, cache
, val
;
375 unsigned int pmc
, pmc_inuse
;
380 /* First pass to count resource use */
381 for (i
= 0; i
< n_ev
; ++i
) {
382 pmc
= (event
[i
] >> EVENT_PMC_SHIFT
) & EVENT_PMC_MASK
;
384 pmc_inuse
|= 1 << pmc
;
387 /* In continuous sampling mode, update SDAR on TLB miss */
388 mmcra
= MMCRA_SDAR_MODE_TLB
;
391 /* Second pass: assign PMCs, set all MMCR1 fields */
392 for (i
= 0; i
< n_ev
; ++i
) {
393 pmc
= (event
[i
] >> EVENT_PMC_SHIFT
) & EVENT_PMC_MASK
;
394 unit
= (event
[i
] >> EVENT_UNIT_SHIFT
) & EVENT_UNIT_MASK
;
395 combine
= (event
[i
] >> EVENT_COMBINE_SHIFT
) & EVENT_COMBINE_MASK
;
396 psel
= event
[i
] & EVENT_PSEL_MASK
;
399 for (pmc
= 1; pmc
<= 4; ++pmc
) {
400 if (!(pmc_inuse
& (1 << pmc
)))
404 pmc_inuse
|= 1 << pmc
;
408 mmcr1
|= unit
<< MMCR1_UNIT_SHIFT(pmc
);
409 mmcr1
|= combine
<< MMCR1_COMBINE_SHIFT(pmc
);
410 mmcr1
|= psel
<< MMCR1_PMCSEL_SHIFT(pmc
);
413 if (event
[i
] & EVENT_IS_L1
) {
414 cache
= event
[i
] >> EVENT_CACHE_SEL_SHIFT
;
415 mmcr1
|= (cache
& 1) << MMCR1_IC_QUAL_SHIFT
;
417 mmcr1
|= (cache
& 1) << MMCR1_DC_QUAL_SHIFT
;
420 if (event
[i
] & EVENT_IS_MARKED
) {
421 mmcra
|= MMCRA_SAMPLE_ENABLE
;
423 val
= (event
[i
] >> EVENT_SAMPLE_SHIFT
) & EVENT_SAMPLE_MASK
;
425 mmcra
|= (val
& 3) << MMCRA_SAMP_MODE_SHIFT
;
426 mmcra
|= (val
>> 2) << MMCRA_SAMP_ELIG_SHIFT
;
431 * PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
432 * the threshold bits are used for the match value.
434 if (event_is_fab_match(event
[i
])) {
435 mmcr1
|= ((event
[i
] >> EVENT_THR_CTL_SHIFT
) &
436 EVENT_THR_CTL_MASK
) << MMCR1_FAB_SHIFT
;
438 val
= (event
[i
] >> EVENT_THR_CTL_SHIFT
) & EVENT_THR_CTL_MASK
;
439 mmcra
|= val
<< MMCRA_THR_CTL_SHIFT
;
440 val
= (event
[i
] >> EVENT_THR_SEL_SHIFT
) & EVENT_THR_SEL_MASK
;
441 mmcra
|= val
<< MMCRA_THR_SEL_SHIFT
;
442 val
= (event
[i
] >> EVENT_THR_CMP_SHIFT
) & EVENT_THR_CMP_MASK
;
443 mmcra
|= val
<< MMCRA_THR_CMP_SHIFT
;
446 if (event
[i
] & EVENT_WANTS_BHRB
) {
447 val
= (event
[i
] >> EVENT_IFM_SHIFT
) & EVENT_IFM_MASK
;
448 mmcra
|= val
<< MMCRA_IFM_SHIFT
;
451 if (pevents
[i
]->attr
.exclude_user
)
452 mmcr2
|= MMCR2_FCP(pmc
);
454 if (pevents
[i
]->attr
.exclude_hv
)
455 mmcr2
|= MMCR2_FCH(pmc
);
457 if (pevents
[i
]->attr
.exclude_kernel
) {
458 if (cpu_has_feature(CPU_FTR_HVMODE
))
459 mmcr2
|= MMCR2_FCH(pmc
);
461 mmcr2
|= MMCR2_FCS(pmc
);
467 /* Return MMCRx values */
470 /* pmc_inuse is 1-based */
472 mmcr
[0] = MMCR0_PMC1CE
;
474 if (pmc_inuse
& 0x7c)
475 mmcr
[0] |= MMCR0_PMCjCE
;
477 /* If we're not using PMC 5 or 6, freeze them */
478 if (!(pmc_inuse
& 0x60))
479 mmcr
[0] |= MMCR0_FC56
;
490 /* Table of alternatives, sorted by column 0 */
491 static const unsigned int event_alternatives
[][MAX_ALT
] = {
492 { PM_MRK_ST_CMPL
, PM_MRK_ST_CMPL_ALT
},
493 { PM_BR_MRK_2PATH
, PM_BR_MRK_2PATH_ALT
},
494 { PM_L3_CO_MEPF
, PM_L3_CO_MEPF_ALT
},
495 { PM_MRK_DATA_FROM_L2MISS
, PM_MRK_DATA_FROM_L2MISS_ALT
},
496 { PM_CMPLU_STALL_ALT
, PM_CMPLU_STALL
},
497 { PM_BR_2PATH
, PM_BR_2PATH_ALT
},
498 { PM_INST_DISP
, PM_INST_DISP_ALT
},
499 { PM_RUN_CYC_ALT
, PM_RUN_CYC
},
500 { PM_MRK_FILT_MATCH
, PM_MRK_FILT_MATCH_ALT
},
501 { PM_LD_MISS_L1
, PM_LD_MISS_L1_ALT
},
502 { PM_RUN_INST_CMPL_ALT
, PM_RUN_INST_CMPL
},
506 * Scan the alternatives table for a match and return the
507 * index into the alternatives table if found, else -1.
509 static int find_alternative(u64 event
)
513 for (i
= 0; i
< ARRAY_SIZE(event_alternatives
); ++i
) {
514 if (event
< event_alternatives
[i
][0])
517 for (j
= 0; j
< MAX_ALT
&& event_alternatives
[i
][j
]; ++j
)
518 if (event
== event_alternatives
[i
][j
])
525 static int power8_get_alternatives(u64 event
, unsigned int flags
, u64 alt
[])
527 int i
, j
, num_alt
= 0;
530 alt
[num_alt
++] = event
;
532 i
= find_alternative(event
);
534 /* Filter out the original event, it's already in alt[0] */
535 for (j
= 0; j
< MAX_ALT
; ++j
) {
536 alt_event
= event_alternatives
[i
][j
];
537 if (alt_event
&& alt_event
!= event
)
538 alt
[num_alt
++] = alt_event
;
542 if (flags
& PPMU_ONLY_COUNT_RUN
) {
544 * We're only counting in RUN state, so PM_CYC is equivalent to
545 * PM_RUN_CYC and PM_INST_CMPL === PM_RUN_INST_CMPL.
548 for (i
= 0; i
< num_alt
; ++i
) {
551 alt
[j
++] = PM_RUN_CYC
;
557 alt
[j
++] = PM_RUN_INST_CMPL
;
559 case PM_RUN_INST_CMPL
:
560 alt
[j
++] = PM_INST_CMPL
;
570 static void power8_disable_pmc(unsigned int pmc
, unsigned long mmcr
[])
573 mmcr
[1] &= ~(0xffUL
<< MMCR1_PMCSEL_SHIFT(pmc
+ 1));
576 GENERIC_EVENT_ATTR(cpu
-cycles
, PM_CYC
);
577 GENERIC_EVENT_ATTR(stalled
-cycles
-frontend
, PM_GCT_NOSLOT_CYC
);
578 GENERIC_EVENT_ATTR(stalled
-cycles
-backend
, PM_CMPLU_STALL
);
579 GENERIC_EVENT_ATTR(instructions
, PM_INST_CMPL
);
580 GENERIC_EVENT_ATTR(branch
-instructions
, PM_BRU_FIN
);
581 GENERIC_EVENT_ATTR(branch
-misses
, PM_BR_MPRED_CMPL
);
582 GENERIC_EVENT_ATTR(cache
-references
, PM_LD_REF_L1
);
583 GENERIC_EVENT_ATTR(cache
-misses
, PM_LD_MISS_L1
);
585 CACHE_EVENT_ATTR(L1
-dcache
-load
-misses
, PM_LD_MISS_L1
);
586 CACHE_EVENT_ATTR(L1
-dcache
-loads
, PM_LD_REF_L1
);
588 CACHE_EVENT_ATTR(L1
-dcache
-prefetches
, PM_L1_PREF
);
589 CACHE_EVENT_ATTR(L1
-dcache
-store
-misses
, PM_ST_MISS_L1
);
590 CACHE_EVENT_ATTR(L1
-icache
-load
-misses
, PM_L1_ICACHE_MISS
);
591 CACHE_EVENT_ATTR(L1
-icache
-loads
, PM_INST_FROM_L1
);
592 CACHE_EVENT_ATTR(L1
-icache
-prefetches
, PM_IC_PREF_WRITE
);
594 CACHE_EVENT_ATTR(LLC
-load
-misses
, PM_DATA_FROM_L3MISS
);
595 CACHE_EVENT_ATTR(LLC
-loads
, PM_DATA_FROM_L3
);
596 CACHE_EVENT_ATTR(LLC
-prefetches
, PM_L3_PREF_ALL
);
597 CACHE_EVENT_ATTR(LLC
-store
-misses
, PM_L2_ST_MISS
);
598 CACHE_EVENT_ATTR(LLC
-stores
, PM_L2_ST
);
600 CACHE_EVENT_ATTR(branch
-load
-misses
, PM_BR_MPRED_CMPL
);
601 CACHE_EVENT_ATTR(branch
-loads
, PM_BRU_FIN
);
602 CACHE_EVENT_ATTR(dTLB
-load
-misses
, PM_DTLB_MISS
);
603 CACHE_EVENT_ATTR(iTLB
-load
-misses
, PM_ITLB_MISS
);
605 static struct attribute
*power8_events_attr
[] = {
606 GENERIC_EVENT_PTR(PM_CYC
),
607 GENERIC_EVENT_PTR(PM_GCT_NOSLOT_CYC
),
608 GENERIC_EVENT_PTR(PM_CMPLU_STALL
),
609 GENERIC_EVENT_PTR(PM_INST_CMPL
),
610 GENERIC_EVENT_PTR(PM_BRU_FIN
),
611 GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL
),
612 GENERIC_EVENT_PTR(PM_LD_REF_L1
),
613 GENERIC_EVENT_PTR(PM_LD_MISS_L1
),
615 CACHE_EVENT_PTR(PM_LD_MISS_L1
),
616 CACHE_EVENT_PTR(PM_LD_REF_L1
),
617 CACHE_EVENT_PTR(PM_L1_PREF
),
618 CACHE_EVENT_PTR(PM_ST_MISS_L1
),
619 CACHE_EVENT_PTR(PM_L1_ICACHE_MISS
),
620 CACHE_EVENT_PTR(PM_INST_FROM_L1
),
621 CACHE_EVENT_PTR(PM_IC_PREF_WRITE
),
622 CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS
),
623 CACHE_EVENT_PTR(PM_DATA_FROM_L3
),
624 CACHE_EVENT_PTR(PM_L3_PREF_ALL
),
625 CACHE_EVENT_PTR(PM_L2_ST_MISS
),
626 CACHE_EVENT_PTR(PM_L2_ST
),
628 CACHE_EVENT_PTR(PM_BR_MPRED_CMPL
),
629 CACHE_EVENT_PTR(PM_BRU_FIN
),
631 CACHE_EVENT_PTR(PM_DTLB_MISS
),
632 CACHE_EVENT_PTR(PM_ITLB_MISS
),
636 static struct attribute_group power8_pmu_events_group
= {
638 .attrs
= power8_events_attr
,
641 PMU_FORMAT_ATTR(event
, "config:0-49");
642 PMU_FORMAT_ATTR(pmcxsel
, "config:0-7");
643 PMU_FORMAT_ATTR(mark
, "config:8");
644 PMU_FORMAT_ATTR(combine
, "config:11");
645 PMU_FORMAT_ATTR(unit
, "config:12-15");
646 PMU_FORMAT_ATTR(pmc
, "config:16-19");
647 PMU_FORMAT_ATTR(cache_sel
, "config:20-23");
648 PMU_FORMAT_ATTR(sample_mode
, "config:24-28");
649 PMU_FORMAT_ATTR(thresh_sel
, "config:29-31");
650 PMU_FORMAT_ATTR(thresh_stop
, "config:32-35");
651 PMU_FORMAT_ATTR(thresh_start
, "config:36-39");
652 PMU_FORMAT_ATTR(thresh_cmp
, "config:40-49");
654 static struct attribute
*power8_pmu_format_attr
[] = {
655 &format_attr_event
.attr
,
656 &format_attr_pmcxsel
.attr
,
657 &format_attr_mark
.attr
,
658 &format_attr_combine
.attr
,
659 &format_attr_unit
.attr
,
660 &format_attr_pmc
.attr
,
661 &format_attr_cache_sel
.attr
,
662 &format_attr_sample_mode
.attr
,
663 &format_attr_thresh_sel
.attr
,
664 &format_attr_thresh_stop
.attr
,
665 &format_attr_thresh_start
.attr
,
666 &format_attr_thresh_cmp
.attr
,
670 struct attribute_group power8_pmu_format_group
= {
672 .attrs
= power8_pmu_format_attr
,
675 static const struct attribute_group
*power8_pmu_attr_groups
[] = {
676 &power8_pmu_format_group
,
677 &power8_pmu_events_group
,
681 static int power8_generic_events
[] = {
682 [PERF_COUNT_HW_CPU_CYCLES
] = PM_CYC
,
683 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] = PM_GCT_NOSLOT_CYC
,
684 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] = PM_CMPLU_STALL
,
685 [PERF_COUNT_HW_INSTRUCTIONS
] = PM_INST_CMPL
,
686 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = PM_BRU_FIN
,
687 [PERF_COUNT_HW_BRANCH_MISSES
] = PM_BR_MPRED_CMPL
,
688 [PERF_COUNT_HW_CACHE_REFERENCES
] = PM_LD_REF_L1
,
689 [PERF_COUNT_HW_CACHE_MISSES
] = PM_LD_MISS_L1
,
692 static u64
power8_bhrb_filter_map(u64 branch_sample_type
)
694 u64 pmu_bhrb_filter
= 0;
696 /* BHRB and regular PMU events share the same privilege state
697 * filter configuration. BHRB is always recorded along with a
698 * regular PMU event. As the privilege state filter is handled
699 * in the basic PMC configuration of the accompanying regular
700 * PMU event, we ignore any separate BHRB specific request.
703 /* No branch filter requested */
704 if (branch_sample_type
& PERF_SAMPLE_BRANCH_ANY
)
705 return pmu_bhrb_filter
;
707 /* Invalid branch filter options - HW does not support */
708 if (branch_sample_type
& PERF_SAMPLE_BRANCH_ANY_RETURN
)
711 if (branch_sample_type
& PERF_SAMPLE_BRANCH_IND_CALL
)
714 if (branch_sample_type
& PERF_SAMPLE_BRANCH_CALL
)
717 if (branch_sample_type
& PERF_SAMPLE_BRANCH_ANY_CALL
) {
718 pmu_bhrb_filter
|= POWER8_MMCRA_IFM1
;
719 return pmu_bhrb_filter
;
722 /* Every thing else is unsupported */
726 static void power8_config_bhrb(u64 pmu_bhrb_filter
)
728 /* Enable BHRB filter in PMU */
729 mtspr(SPRN_MMCRA
, (mfspr(SPRN_MMCRA
) | pmu_bhrb_filter
));
732 #define C(x) PERF_COUNT_HW_CACHE_##x
735 * Table of generalized cache-related events.
736 * 0 means not supported, -1 means nonsensical, other values
739 static int power8_cache_events
[C(MAX
)][C(OP_MAX
)][C(RESULT_MAX
)] = {
742 [ C(RESULT_ACCESS
) ] = PM_LD_REF_L1
,
743 [ C(RESULT_MISS
) ] = PM_LD_MISS_L1
,
746 [ C(RESULT_ACCESS
) ] = 0,
747 [ C(RESULT_MISS
) ] = PM_ST_MISS_L1
,
749 [ C(OP_PREFETCH
) ] = {
750 [ C(RESULT_ACCESS
) ] = PM_L1_PREF
,
751 [ C(RESULT_MISS
) ] = 0,
756 [ C(RESULT_ACCESS
) ] = PM_INST_FROM_L1
,
757 [ C(RESULT_MISS
) ] = PM_L1_ICACHE_MISS
,
760 [ C(RESULT_ACCESS
) ] = PM_L1_DEMAND_WRITE
,
761 [ C(RESULT_MISS
) ] = -1,
763 [ C(OP_PREFETCH
) ] = {
764 [ C(RESULT_ACCESS
) ] = PM_IC_PREF_WRITE
,
765 [ C(RESULT_MISS
) ] = 0,
770 [ C(RESULT_ACCESS
) ] = PM_DATA_FROM_L3
,
771 [ C(RESULT_MISS
) ] = PM_DATA_FROM_L3MISS
,
774 [ C(RESULT_ACCESS
) ] = PM_L2_ST
,
775 [ C(RESULT_MISS
) ] = PM_L2_ST_MISS
,
777 [ C(OP_PREFETCH
) ] = {
778 [ C(RESULT_ACCESS
) ] = PM_L3_PREF_ALL
,
779 [ C(RESULT_MISS
) ] = 0,
784 [ C(RESULT_ACCESS
) ] = 0,
785 [ C(RESULT_MISS
) ] = PM_DTLB_MISS
,
788 [ C(RESULT_ACCESS
) ] = -1,
789 [ C(RESULT_MISS
) ] = -1,
791 [ C(OP_PREFETCH
) ] = {
792 [ C(RESULT_ACCESS
) ] = -1,
793 [ C(RESULT_MISS
) ] = -1,
798 [ C(RESULT_ACCESS
) ] = 0,
799 [ C(RESULT_MISS
) ] = PM_ITLB_MISS
,
802 [ C(RESULT_ACCESS
) ] = -1,
803 [ C(RESULT_MISS
) ] = -1,
805 [ C(OP_PREFETCH
) ] = {
806 [ C(RESULT_ACCESS
) ] = -1,
807 [ C(RESULT_MISS
) ] = -1,
812 [ C(RESULT_ACCESS
) ] = PM_BRU_FIN
,
813 [ C(RESULT_MISS
) ] = PM_BR_MPRED_CMPL
,
816 [ C(RESULT_ACCESS
) ] = -1,
817 [ C(RESULT_MISS
) ] = -1,
819 [ C(OP_PREFETCH
) ] = {
820 [ C(RESULT_ACCESS
) ] = -1,
821 [ C(RESULT_MISS
) ] = -1,
826 [ C(RESULT_ACCESS
) ] = -1,
827 [ C(RESULT_MISS
) ] = -1,
830 [ C(RESULT_ACCESS
) ] = -1,
831 [ C(RESULT_MISS
) ] = -1,
833 [ C(OP_PREFETCH
) ] = {
834 [ C(RESULT_ACCESS
) ] = -1,
835 [ C(RESULT_MISS
) ] = -1,
842 static struct power_pmu power8_pmu
= {
845 .max_alternatives
= MAX_ALT
+ 1,
846 .add_fields
= POWER8_ADD_FIELDS
,
847 .test_adder
= POWER8_TEST_ADDER
,
848 .compute_mmcr
= power8_compute_mmcr
,
849 .config_bhrb
= power8_config_bhrb
,
850 .bhrb_filter_map
= power8_bhrb_filter_map
,
851 .get_constraint
= power8_get_constraint
,
852 .get_alternatives
= power8_get_alternatives
,
853 .disable_pmc
= power8_disable_pmc
,
854 .flags
= PPMU_HAS_SIER
| PPMU_ARCH_207S
,
855 .n_generic
= ARRAY_SIZE(power8_generic_events
),
856 .generic_events
= power8_generic_events
,
857 .cache_events
= &power8_cache_events
,
858 .attr_groups
= power8_pmu_attr_groups
,
862 static int __init
init_power8_pmu(void)
866 if (!cur_cpu_spec
->oprofile_cpu_type
||
867 strcmp(cur_cpu_spec
->oprofile_cpu_type
, "ppc64/power8"))
870 rc
= register_power_pmu(&power8_pmu
);
874 /* Tell userspace that EBB is supported */
875 cur_cpu_spec
->cpu_user_features2
|= PPC_FEATURE2_EBB
;
877 if (cpu_has_feature(CPU_FTR_PMAO_BUG
))
878 pr_info("PMAO restore workaround active.\n");
882 early_initcall(init_power8_pmu
);