Commit | Line | Data |
---|---|---|
e05b9b9e ME |
1 | /* |
2 | * Performance counter support for POWER8 processors. | |
3 | * | |
4 | * Copyright 2009 Paul Mackerras, IBM Corporation. | |
5 | * Copyright 2013 Michael Ellerman, IBM Corporation. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License | |
9 | * as published by the Free Software Foundation; either version | |
10 | * 2 of the License, or (at your option) any later version. | |
11 | */ | |
12 | ||
c2e37a26 ME |
13 | #define pr_fmt(fmt) "power8-pmu: " fmt |
14 | ||
e05b9b9e ME |
15 | #include <linux/kernel.h> |
16 | #include <linux/perf_event.h> | |
17 | #include <asm/firmware.h> | |
9de5cb0f | 18 | #include <asm/cputable.h> |
e05b9b9e | 19 | |
e05b9b9e ME |
20 | /* |
21 | * Some power8 event codes. | |
22 | */ | |
e0728b50 SB |
23 | #define EVENT(_name, _code) _name = _code, |
24 | ||
25 | enum { | |
26 | #include "power8-events-list.h" | |
27 | }; | |
2fdd313f | 28 | |
e0728b50 | 29 | #undef EVENT |
e05b9b9e ME |
30 | |
31 | /* | |
32 | * Raw event encoding for POWER8: | |
33 | * | |
34 | * 60 56 52 48 44 40 36 32 | |
35 | * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | | |
ba969237 ME |
36 | * | | [ ] [ thresh_cmp ] [ thresh_ctl ] |
37 | * | | | | | |
38 | * | | *- IFM (Linux) thresh start/stop OR FAB match -* | |
39 | * | *- BHRB (Linux) | |
40 | * *- EBB (Linux) | |
e05b9b9e ME |
41 | * |
42 | * 28 24 20 16 12 8 4 0 | |
43 | * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | | |
44 | * [ ] [ sample ] [cache] [ pmc ] [unit ] c m [ pmcxsel ] | |
45 | * | | | | | | |
46 | * | | | | *- mark | |
47 | * | | *- L1/L2/L3 cache_sel | | |
48 | * | | | | |
49 | * | *- sampling mode for marked events *- combine | |
50 | * | | |
51 | * *- thresh_sel | |
52 | * | |
53 | * Below uses IBM bit numbering. | |
54 | * | |
55 | * MMCR1[x:y] = unit (PMCxUNIT) | |
56 | * MMCR1[x] = combine (PMCxCOMB) | |
57 | * | |
58 | * if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011 | |
59 | * # PM_MRK_FAB_RSP_MATCH | |
60 | * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH) | |
61 | * else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001 | |
62 | * # PM_MRK_FAB_RSP_MATCH_CYC | |
63 | * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH) | |
64 | * else | |
65 | * MMCRA[48:55] = thresh_ctl (THRESH START/END) | |
66 | * | |
67 | * if thresh_sel: | |
68 | * MMCRA[45:47] = thresh_sel | |
69 | * | |
70 | * if thresh_cmp: | |
71 | * MMCRA[22:24] = thresh_cmp[0:2] | |
72 | * MMCRA[25:31] = thresh_cmp[3:9] | |
73 | * | |
74 | * if unit == 6 or unit == 7 | |
75 | * MMCRC[53:55] = cache_sel[1:3] (L2EVENT_SEL) | |
76 | * else if unit == 8 or unit == 9: | |
77 | * if cache_sel[0] == 0: # L3 bank | |
78 | * MMCRC[47:49] = cache_sel[1:3] (L3EVENT_SEL0) | |
79 | * else if cache_sel[0] == 1: | |
80 | * MMCRC[50:51] = cache_sel[2:3] (L3EVENT_SEL1) | |
81 | * else if cache_sel[1]: # L1 event | |
82 | * MMCR1[16] = cache_sel[2] | |
83 | * MMCR1[17] = cache_sel[3] | |
84 | * | |
85 | * if mark: | |
86 | * MMCRA[63] = 1 (SAMPLE_ENABLE) | |
87 | * MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG) | |
88 | * MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE) | |
ba969237 ME |
89 | * |
90 | * if EBB and BHRB: | |
91 | * MMCRA[32:33] = IFM | |
e05b9b9e ME |
92 | * |
93 | */ | |
94 | ||
4df48999 | 95 | #define EVENT_EBB_MASK 1ull |
fb568d76 | 96 | #define EVENT_EBB_SHIFT PERF_EVENT_CONFIG_EBB_SHIFT |
ba969237 ME |
97 | #define EVENT_BHRB_MASK 1ull |
98 | #define EVENT_BHRB_SHIFT 62 | |
99 | #define EVENT_WANTS_BHRB (EVENT_BHRB_MASK << EVENT_BHRB_SHIFT) | |
100 | #define EVENT_IFM_MASK 3ull | |
101 | #define EVENT_IFM_SHIFT 60 | |
e05b9b9e ME |
102 | #define EVENT_THR_CMP_SHIFT 40 /* Threshold CMP value */ |
103 | #define EVENT_THR_CMP_MASK 0x3ff | |
104 | #define EVENT_THR_CTL_SHIFT 32 /* Threshold control value (start/stop) */ | |
105 | #define EVENT_THR_CTL_MASK 0xffull | |
106 | #define EVENT_THR_SEL_SHIFT 29 /* Threshold select value */ | |
107 | #define EVENT_THR_SEL_MASK 0x7 | |
108 | #define EVENT_THRESH_SHIFT 29 /* All threshold bits */ | |
109 | #define EVENT_THRESH_MASK 0x1fffffull | |
110 | #define EVENT_SAMPLE_SHIFT 24 /* Sampling mode & eligibility */ | |
111 | #define EVENT_SAMPLE_MASK 0x1f | |
112 | #define EVENT_CACHE_SEL_SHIFT 20 /* L2/L3 cache select */ | |
113 | #define EVENT_CACHE_SEL_MASK 0xf | |
114 | #define EVENT_IS_L1 (4 << EVENT_CACHE_SEL_SHIFT) | |
115 | #define EVENT_PMC_SHIFT 16 /* PMC number (1-based) */ | |
116 | #define EVENT_PMC_MASK 0xf | |
117 | #define EVENT_UNIT_SHIFT 12 /* Unit */ | |
118 | #define EVENT_UNIT_MASK 0xf | |
119 | #define EVENT_COMBINE_SHIFT 11 /* Combine bit */ | |
120 | #define EVENT_COMBINE_MASK 0x1 | |
121 | #define EVENT_MARKED_SHIFT 8 /* Marked bit */ | |
122 | #define EVENT_MARKED_MASK 0x1 | |
123 | #define EVENT_IS_MARKED (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) | |
124 | #define EVENT_PSEL_MASK 0xff /* PMCxSEL value */ | |
125 | ||
ba969237 ME |
126 | /* Bits defined by Linux */ |
127 | #define EVENT_LINUX_MASK \ | |
128 | ((EVENT_EBB_MASK << EVENT_EBB_SHIFT) | \ | |
129 | (EVENT_BHRB_MASK << EVENT_BHRB_SHIFT) | \ | |
130 | (EVENT_IFM_MASK << EVENT_IFM_SHIFT)) | |
131 | ||
d8bec4c9 ME |
132 | #define EVENT_VALID_MASK \ |
133 | ((EVENT_THRESH_MASK << EVENT_THRESH_SHIFT) | \ | |
134 | (EVENT_SAMPLE_MASK << EVENT_SAMPLE_SHIFT) | \ | |
135 | (EVENT_CACHE_SEL_MASK << EVENT_CACHE_SEL_SHIFT) | \ | |
136 | (EVENT_PMC_MASK << EVENT_PMC_SHIFT) | \ | |
137 | (EVENT_UNIT_MASK << EVENT_UNIT_SHIFT) | \ | |
138 | (EVENT_COMBINE_MASK << EVENT_COMBINE_SHIFT) | \ | |
139 | (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) | \ | |
ba969237 | 140 | EVENT_LINUX_MASK | \ |
d8bec4c9 ME |
141 | EVENT_PSEL_MASK) |
142 | ||
b1113557 AK |
143 | /* MMCRA IFM bits - POWER8 */ |
144 | #define POWER8_MMCRA_IFM1 0x0000000040000000UL | |
145 | #define POWER8_MMCRA_IFM2 0x0000000080000000UL | |
146 | #define POWER8_MMCRA_IFM3 0x00000000C0000000UL | |
147 | ||
148 | #define ONLY_PLM \ | |
149 | (PERF_SAMPLE_BRANCH_USER |\ | |
150 | PERF_SAMPLE_BRANCH_KERNEL |\ | |
151 | PERF_SAMPLE_BRANCH_HV) | |
152 | ||
e05b9b9e ME |
153 | /* |
154 | * Layout of constraint bits: | |
155 | * | |
156 | * 60 56 52 48 44 40 36 32 | |
157 | * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | | |
158 | * [ fab_match ] [ thresh_cmp ] [ thresh_ctl ] [ ] | |
159 | * | | |
160 | * thresh_sel -* | |
161 | * | |
162 | * 28 24 20 16 12 8 4 0 | |
163 | * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | | |
ba969237 ME |
164 | * [ ] | [ ] [ sample ] [ ] [6] [5] [4] [3] [2] [1] |
165 | * | | | | | |
166 | * BHRB IFM -* | | | Count of events for each PMC. | |
167 | * EBB -* | | p1, p2, p3, p4, p5, p6. | |
168 | * L1 I/D qualifier -* | | |
e05b9b9e ME |
169 | * nc - number of counters -* |
170 | * | |
171 | * The PMC fields P1..P6, and NC, are adder fields. As we accumulate constraints | |
172 | * we want the low bit of each field to be added to any existing value. | |
173 | * | |
174 | * Everything else is a value field. | |
175 | */ | |
176 | ||
177 | #define CNST_FAB_MATCH_VAL(v) (((v) & EVENT_THR_CTL_MASK) << 56) | |
178 | #define CNST_FAB_MATCH_MASK CNST_FAB_MATCH_VAL(EVENT_THR_CTL_MASK) | |
179 | ||
180 | /* We just throw all the threshold bits into the constraint */ | |
181 | #define CNST_THRESH_VAL(v) (((v) & EVENT_THRESH_MASK) << 32) | |
182 | #define CNST_THRESH_MASK CNST_THRESH_VAL(EVENT_THRESH_MASK) | |
183 | ||
4df48999 ME |
184 | #define CNST_EBB_VAL(v) (((v) & EVENT_EBB_MASK) << 24) |
185 | #define CNST_EBB_MASK CNST_EBB_VAL(EVENT_EBB_MASK) | |
186 | ||
ba969237 ME |
187 | #define CNST_IFM_VAL(v) (((v) & EVENT_IFM_MASK) << 25) |
188 | #define CNST_IFM_MASK CNST_IFM_VAL(EVENT_IFM_MASK) | |
189 | ||
e05b9b9e ME |
190 | #define CNST_L1_QUAL_VAL(v) (((v) & 3) << 22) |
191 | #define CNST_L1_QUAL_MASK CNST_L1_QUAL_VAL(3) | |
192 | ||
193 | #define CNST_SAMPLE_VAL(v) (((v) & EVENT_SAMPLE_MASK) << 16) | |
194 | #define CNST_SAMPLE_MASK CNST_SAMPLE_VAL(EVENT_SAMPLE_MASK) | |
195 | ||
196 | /* | |
197 | * For NC we are counting up to 4 events. This requires three bits, and we need | |
198 | * the fifth event to overflow and set the 4th bit. To achieve that we bias the | |
199 | * fields by 3 in test_adder. | |
200 | */ | |
201 | #define CNST_NC_SHIFT 12 | |
202 | #define CNST_NC_VAL (1 << CNST_NC_SHIFT) | |
203 | #define CNST_NC_MASK (8 << CNST_NC_SHIFT) | |
204 | #define POWER8_TEST_ADDER (3 << CNST_NC_SHIFT) | |
205 | ||
206 | /* | |
207 | * For the per-PMC fields we have two bits. The low bit is added, so if two | |
208 | * events ask for the same PMC the sum will overflow, setting the high bit, | |
209 | * indicating an error. So our mask sets the high bit. | |
210 | */ | |
211 | #define CNST_PMC_SHIFT(pmc) ((pmc - 1) * 2) | |
212 | #define CNST_PMC_VAL(pmc) (1 << CNST_PMC_SHIFT(pmc)) | |
213 | #define CNST_PMC_MASK(pmc) (2 << CNST_PMC_SHIFT(pmc)) | |
214 | ||
215 | /* Our add_fields is defined as: */ | |
216 | #define POWER8_ADD_FIELDS \ | |
217 | CNST_PMC_VAL(1) | CNST_PMC_VAL(2) | CNST_PMC_VAL(3) | \ | |
218 | CNST_PMC_VAL(4) | CNST_PMC_VAL(5) | CNST_PMC_VAL(6) | CNST_NC_VAL | |
219 | ||
220 | ||
221 | /* Bits in MMCR1 for POWER8 */ | |
222 | #define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1))) | |
223 | #define MMCR1_COMBINE_SHIFT(pmc) (35 - ((pmc) - 1)) | |
224 | #define MMCR1_PMCSEL_SHIFT(pmc) (24 - (((pmc) - 1)) * 8) | |
a53b27b3 | 225 | #define MMCR1_FAB_SHIFT 36 |
e05b9b9e ME |
226 | #define MMCR1_DC_QUAL_SHIFT 47 |
227 | #define MMCR1_IC_QUAL_SHIFT 46 | |
228 | ||
229 | /* Bits in MMCRA for POWER8 */ | |
230 | #define MMCRA_SAMP_MODE_SHIFT 1 | |
231 | #define MMCRA_SAMP_ELIG_SHIFT 4 | |
232 | #define MMCRA_THR_CTL_SHIFT 8 | |
233 | #define MMCRA_THR_SEL_SHIFT 16 | |
234 | #define MMCRA_THR_CMP_SHIFT 32 | |
235 | #define MMCRA_SDAR_MODE_TLB (1ull << 42) | |
ba969237 | 236 | #define MMCRA_IFM_SHIFT 30 |
e05b9b9e | 237 | |
9de5cb0f ME |
238 | /* Bits in MMCR2 for POWER8 */ |
239 | #define MMCR2_FCS(pmc) (1ull << (63 - (((pmc) - 1) * 9))) | |
240 | #define MMCR2_FCP(pmc) (1ull << (62 - (((pmc) - 1) * 9))) | |
241 | #define MMCR2_FCH(pmc) (1ull << (57 - (((pmc) - 1) * 9))) | |
242 | ||
e05b9b9e ME |
243 | |
244 | static inline bool event_is_fab_match(u64 event) | |
245 | { | |
246 | /* Only check pmc, unit and pmcxsel, ignore the edge bit (0) */ | |
247 | event &= 0xff0fe; | |
248 | ||
249 | /* PM_MRK_FAB_RSP_MATCH & PM_MRK_FAB_RSP_MATCH_CYC */ | |
250 | return (event == 0x30056 || event == 0x4f052); | |
251 | } | |
252 | ||
253 | static int power8_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) | |
254 | { | |
4df48999 | 255 | unsigned int unit, pmc, cache, ebb; |
e05b9b9e ME |
256 | unsigned long mask, value; |
257 | ||
258 | mask = value = 0; | |
259 | ||
d8bec4c9 ME |
260 | if (event & ~EVENT_VALID_MASK) |
261 | return -1; | |
262 | ||
4df48999 ME |
263 | pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK; |
264 | unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK; | |
265 | cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK; | |
fb568d76 | 266 | ebb = (event >> EVENT_EBB_SHIFT) & EVENT_EBB_MASK; |
4df48999 | 267 | |
e05b9b9e | 268 | if (pmc) { |
7cbba630 ME |
269 | u64 base_event; |
270 | ||
e05b9b9e ME |
271 | if (pmc > 6) |
272 | return -1; | |
273 | ||
7cbba630 | 274 | /* Ignore Linux defined bits when checking event below */ |
ba969237 | 275 | base_event = event & ~EVENT_LINUX_MASK; |
e05b9b9e | 276 | |
5bcca743 MS |
277 | if (pmc >= 5 && base_event != PM_RUN_INST_CMPL && |
278 | base_event != PM_RUN_CYC) | |
e05b9b9e | 279 | return -1; |
7cbba630 ME |
280 | |
281 | mask |= CNST_PMC_MASK(pmc); | |
282 | value |= CNST_PMC_VAL(pmc); | |
e05b9b9e ME |
283 | } |
284 | ||
285 | if (pmc <= 4) { | |
286 | /* | |
287 | * Add to number of counters in use. Note this includes events with | |
288 | * a PMC of 0 - they still need a PMC, it's just assigned later. | |
289 | * Don't count events on PMC 5 & 6, there is only one valid event | |
290 | * on each of those counters, and they are handled above. | |
291 | */ | |
292 | mask |= CNST_NC_MASK; | |
293 | value |= CNST_NC_VAL; | |
294 | } | |
295 | ||
296 | if (unit >= 6 && unit <= 9) { | |
297 | /* | |
298 | * L2/L3 events contain a cache selector field, which is | |
299 | * supposed to be programmed into MMCRC. However MMCRC is only | |
300 | * HV writable, and there is no API for guest kernels to modify | |
301 | * it. The solution is for the hypervisor to initialise the | |
302 | * field to zeroes, and for us to only ever allow events that | |
e9aaac1a ME |
303 | * have a cache selector of zero. The bank selector (bit 3) is |
304 | * irrelevant, as long as the rest of the value is 0. | |
e05b9b9e | 305 | */ |
e9aaac1a | 306 | if (cache & 0x7) |
e05b9b9e ME |
307 | return -1; |
308 | ||
309 | } else if (event & EVENT_IS_L1) { | |
310 | mask |= CNST_L1_QUAL_MASK; | |
311 | value |= CNST_L1_QUAL_VAL(cache); | |
312 | } | |
313 | ||
314 | if (event & EVENT_IS_MARKED) { | |
315 | mask |= CNST_SAMPLE_MASK; | |
316 | value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT); | |
317 | } | |
318 | ||
319 | /* | |
320 | * Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC, | |
321 | * the threshold control bits are used for the match value. | |
322 | */ | |
323 | if (event_is_fab_match(event)) { | |
324 | mask |= CNST_FAB_MATCH_MASK; | |
325 | value |= CNST_FAB_MATCH_VAL(event >> EVENT_THR_CTL_SHIFT); | |
326 | } else { | |
327 | /* | |
328 | * Check the mantissa upper two bits are not zero, unless the | |
329 | * exponent is also zero. See the THRESH_CMP_MANTISSA doc. | |
330 | */ | |
331 | unsigned int cmp, exp; | |
332 | ||
333 | cmp = (event >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK; | |
334 | exp = cmp >> 7; | |
335 | ||
336 | if (exp && (cmp & 0x60) == 0) | |
337 | return -1; | |
338 | ||
339 | mask |= CNST_THRESH_MASK; | |
340 | value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT); | |
341 | } | |
342 | ||
4df48999 ME |
343 | if (!pmc && ebb) |
344 | /* EBB events must specify the PMC */ | |
345 | return -1; | |
346 | ||
ba969237 ME |
347 | if (event & EVENT_WANTS_BHRB) { |
348 | if (!ebb) | |
349 | /* Only EBB events can request BHRB */ | |
350 | return -1; | |
351 | ||
352 | mask |= CNST_IFM_MASK; | |
353 | value |= CNST_IFM_VAL(event >> EVENT_IFM_SHIFT); | |
354 | } | |
355 | ||
4df48999 ME |
356 | /* |
357 | * All events must agree on EBB, either all request it or none. | |
358 | * EBB events are pinned & exclusive, so this should never actually | |
359 | * hit, but we leave it as a fallback in case. | |
360 | */ | |
361 | mask |= CNST_EBB_VAL(ebb); | |
362 | value |= CNST_EBB_MASK; | |
363 | ||
e05b9b9e ME |
364 | *maskp = mask; |
365 | *valp = value; | |
366 | ||
367 | return 0; | |
368 | } | |
369 | ||
370 | static int power8_compute_mmcr(u64 event[], int n_ev, | |
8abd818f ME |
371 | unsigned int hwc[], unsigned long mmcr[], |
372 | struct perf_event *pevents[]) | |
e05b9b9e | 373 | { |
9de5cb0f | 374 | unsigned long mmcra, mmcr1, mmcr2, unit, combine, psel, cache, val; |
e05b9b9e ME |
375 | unsigned int pmc, pmc_inuse; |
376 | int i; | |
377 | ||
378 | pmc_inuse = 0; | |
379 | ||
380 | /* First pass to count resource use */ | |
381 | for (i = 0; i < n_ev; ++i) { | |
382 | pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK; | |
383 | if (pmc) | |
384 | pmc_inuse |= 1 << pmc; | |
385 | } | |
386 | ||
446957ba | 387 | /* In continuous sampling mode, update SDAR on TLB miss */ |
e05b9b9e | 388 | mmcra = MMCRA_SDAR_MODE_TLB; |
9de5cb0f | 389 | mmcr1 = mmcr2 = 0; |
e05b9b9e ME |
390 | |
391 | /* Second pass: assign PMCs, set all MMCR1 fields */ | |
392 | for (i = 0; i < n_ev; ++i) { | |
393 | pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK; | |
394 | unit = (event[i] >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK; | |
395 | combine = (event[i] >> EVENT_COMBINE_SHIFT) & EVENT_COMBINE_MASK; | |
396 | psel = event[i] & EVENT_PSEL_MASK; | |
397 | ||
398 | if (!pmc) { | |
399 | for (pmc = 1; pmc <= 4; ++pmc) { | |
400 | if (!(pmc_inuse & (1 << pmc))) | |
401 | break; | |
402 | } | |
403 | ||
404 | pmc_inuse |= 1 << pmc; | |
405 | } | |
406 | ||
407 | if (pmc <= 4) { | |
408 | mmcr1 |= unit << MMCR1_UNIT_SHIFT(pmc); | |
409 | mmcr1 |= combine << MMCR1_COMBINE_SHIFT(pmc); | |
410 | mmcr1 |= psel << MMCR1_PMCSEL_SHIFT(pmc); | |
411 | } | |
412 | ||
413 | if (event[i] & EVENT_IS_L1) { | |
414 | cache = event[i] >> EVENT_CACHE_SEL_SHIFT; | |
415 | mmcr1 |= (cache & 1) << MMCR1_IC_QUAL_SHIFT; | |
416 | cache >>= 1; | |
417 | mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT; | |
418 | } | |
419 | ||
420 | if (event[i] & EVENT_IS_MARKED) { | |
421 | mmcra |= MMCRA_SAMPLE_ENABLE; | |
422 | ||
423 | val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK; | |
424 | if (val) { | |
425 | mmcra |= (val & 3) << MMCRA_SAMP_MODE_SHIFT; | |
426 | mmcra |= (val >> 2) << MMCRA_SAMP_ELIG_SHIFT; | |
427 | } | |
428 | } | |
429 | ||
430 | /* | |
431 | * PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC, | |
432 | * the threshold bits are used for the match value. | |
433 | */ | |
434 | if (event_is_fab_match(event[i])) { | |
a53b27b3 ME |
435 | mmcr1 |= ((event[i] >> EVENT_THR_CTL_SHIFT) & |
436 | EVENT_THR_CTL_MASK) << MMCR1_FAB_SHIFT; | |
e05b9b9e ME |
437 | } else { |
438 | val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK; | |
439 | mmcra |= val << MMCRA_THR_CTL_SHIFT; | |
440 | val = (event[i] >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK; | |
441 | mmcra |= val << MMCRA_THR_SEL_SHIFT; | |
442 | val = (event[i] >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK; | |
443 | mmcra |= val << MMCRA_THR_CMP_SHIFT; | |
444 | } | |
445 | ||
ba969237 ME |
446 | if (event[i] & EVENT_WANTS_BHRB) { |
447 | val = (event[i] >> EVENT_IFM_SHIFT) & EVENT_IFM_MASK; | |
448 | mmcra |= val << MMCRA_IFM_SHIFT; | |
449 | } | |
450 | ||
9de5cb0f ME |
451 | if (pevents[i]->attr.exclude_user) |
452 | mmcr2 |= MMCR2_FCP(pmc); | |
453 | ||
454 | if (pevents[i]->attr.exclude_hv) | |
455 | mmcr2 |= MMCR2_FCH(pmc); | |
456 | ||
457 | if (pevents[i]->attr.exclude_kernel) { | |
458 | if (cpu_has_feature(CPU_FTR_HVMODE)) | |
459 | mmcr2 |= MMCR2_FCH(pmc); | |
460 | else | |
461 | mmcr2 |= MMCR2_FCS(pmc); | |
462 | } | |
463 | ||
e05b9b9e ME |
464 | hwc[i] = pmc - 1; |
465 | } | |
466 | ||
467 | /* Return MMCRx values */ | |
468 | mmcr[0] = 0; | |
469 | ||
470 | /* pmc_inuse is 1-based */ | |
471 | if (pmc_inuse & 2) | |
472 | mmcr[0] = MMCR0_PMC1CE; | |
473 | ||
474 | if (pmc_inuse & 0x7c) | |
475 | mmcr[0] |= MMCR0_PMCjCE; | |
476 | ||
7a7a41f9 ME |
477 | /* If we're not using PMC 5 or 6, freeze them */ |
478 | if (!(pmc_inuse & 0x60)) | |
479 | mmcr[0] |= MMCR0_FC56; | |
480 | ||
e05b9b9e ME |
481 | mmcr[1] = mmcr1; |
482 | mmcr[2] = mmcra; | |
9de5cb0f | 483 | mmcr[3] = mmcr2; |
e05b9b9e ME |
484 | |
485 | return 0; | |
486 | } | |
487 | ||
488 | #define MAX_ALT 2 | |
489 | ||
490 | /* Table of alternatives, sorted by column 0 */ | |
491 | static const unsigned int event_alternatives[][MAX_ALT] = { | |
5bcca743 MS |
492 | { PM_MRK_ST_CMPL, PM_MRK_ST_CMPL_ALT }, |
493 | { PM_BR_MRK_2PATH, PM_BR_MRK_2PATH_ALT }, | |
494 | { PM_L3_CO_MEPF, PM_L3_CO_MEPF_ALT }, | |
495 | { PM_MRK_DATA_FROM_L2MISS, PM_MRK_DATA_FROM_L2MISS_ALT }, | |
496 | { PM_CMPLU_STALL_ALT, PM_CMPLU_STALL }, | |
497 | { PM_BR_2PATH, PM_BR_2PATH_ALT }, | |
498 | { PM_INST_DISP, PM_INST_DISP_ALT }, | |
499 | { PM_RUN_CYC_ALT, PM_RUN_CYC }, | |
500 | { PM_MRK_FILT_MATCH, PM_MRK_FILT_MATCH_ALT }, | |
501 | { PM_LD_MISS_L1, PM_LD_MISS_L1_ALT }, | |
502 | { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL }, | |
e05b9b9e ME |
503 | }; |
504 | ||
505 | /* | |
506 | * Scan the alternatives table for a match and return the | |
507 | * index into the alternatives table if found, else -1. | |
508 | */ | |
509 | static int find_alternative(u64 event) | |
510 | { | |
511 | int i, j; | |
512 | ||
513 | for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) { | |
514 | if (event < event_alternatives[i][0]) | |
515 | break; | |
516 | ||
517 | for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j) | |
518 | if (event == event_alternatives[i][j]) | |
519 | return i; | |
520 | } | |
521 | ||
522 | return -1; | |
523 | } | |
524 | ||
525 | static int power8_get_alternatives(u64 event, unsigned int flags, u64 alt[]) | |
526 | { | |
527 | int i, j, num_alt = 0; | |
528 | u64 alt_event; | |
529 | ||
530 | alt[num_alt++] = event; | |
531 | ||
532 | i = find_alternative(event); | |
533 | if (i >= 0) { | |
534 | /* Filter out the original event, it's already in alt[0] */ | |
535 | for (j = 0; j < MAX_ALT; ++j) { | |
536 | alt_event = event_alternatives[i][j]; | |
537 | if (alt_event && alt_event != event) | |
538 | alt[num_alt++] = alt_event; | |
539 | } | |
540 | } | |
541 | ||
542 | if (flags & PPMU_ONLY_COUNT_RUN) { | |
543 | /* | |
544 | * We're only counting in RUN state, so PM_CYC is equivalent to | |
545 | * PM_RUN_CYC and PM_INST_CMPL === PM_RUN_INST_CMPL. | |
546 | */ | |
547 | j = num_alt; | |
548 | for (i = 0; i < num_alt; ++i) { | |
549 | switch (alt[i]) { | |
5bcca743 MS |
550 | case PM_CYC: |
551 | alt[j++] = PM_RUN_CYC; | |
e05b9b9e | 552 | break; |
5bcca743 MS |
553 | case PM_RUN_CYC: |
554 | alt[j++] = PM_CYC; | |
e05b9b9e | 555 | break; |
5bcca743 MS |
556 | case PM_INST_CMPL: |
557 | alt[j++] = PM_RUN_INST_CMPL; | |
e05b9b9e | 558 | break; |
5bcca743 MS |
559 | case PM_RUN_INST_CMPL: |
560 | alt[j++] = PM_INST_CMPL; | |
e05b9b9e ME |
561 | break; |
562 | } | |
563 | } | |
564 | num_alt = j; | |
565 | } | |
566 | ||
567 | return num_alt; | |
568 | } | |
569 | ||
570 | static void power8_disable_pmc(unsigned int pmc, unsigned long mmcr[]) | |
571 | { | |
572 | if (pmc <= 3) | |
573 | mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SHIFT(pmc + 1)); | |
574 | } | |
575 | ||
e0728b50 SB |
576 | GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC); |
577 | GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_GCT_NOSLOT_CYC); | |
578 | GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL); | |
579 | GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL); | |
580 | GENERIC_EVENT_ATTR(branch-instructions, PM_BRU_FIN); | |
581 | GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED_CMPL); | |
582 | GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1); | |
583 | GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1); | |
584 | ||
585 | CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1); | |
586 | CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1); | |
587 | ||
588 | CACHE_EVENT_ATTR(L1-dcache-prefetches, PM_L1_PREF); | |
589 | CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1); | |
590 | CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS); | |
591 | CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1); | |
592 | CACHE_EVENT_ATTR(L1-icache-prefetches, PM_IC_PREF_WRITE); | |
593 | ||
594 | CACHE_EVENT_ATTR(LLC-load-misses, PM_DATA_FROM_L3MISS); | |
595 | CACHE_EVENT_ATTR(LLC-loads, PM_DATA_FROM_L3); | |
596 | CACHE_EVENT_ATTR(LLC-prefetches, PM_L3_PREF_ALL); | |
597 | CACHE_EVENT_ATTR(LLC-store-misses, PM_L2_ST_MISS); | |
598 | CACHE_EVENT_ATTR(LLC-stores, PM_L2_ST); | |
599 | ||
600 | CACHE_EVENT_ATTR(branch-load-misses, PM_BR_MPRED_CMPL); | |
601 | CACHE_EVENT_ATTR(branch-loads, PM_BRU_FIN); | |
602 | CACHE_EVENT_ATTR(dTLB-load-misses, PM_DTLB_MISS); | |
603 | CACHE_EVENT_ATTR(iTLB-load-misses, PM_ITLB_MISS); | |
604 | ||
605 | static struct attribute *power8_events_attr[] = { | |
606 | GENERIC_EVENT_PTR(PM_CYC), | |
607 | GENERIC_EVENT_PTR(PM_GCT_NOSLOT_CYC), | |
608 | GENERIC_EVENT_PTR(PM_CMPLU_STALL), | |
609 | GENERIC_EVENT_PTR(PM_INST_CMPL), | |
610 | GENERIC_EVENT_PTR(PM_BRU_FIN), | |
611 | GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL), | |
612 | GENERIC_EVENT_PTR(PM_LD_REF_L1), | |
613 | GENERIC_EVENT_PTR(PM_LD_MISS_L1), | |
614 | ||
615 | CACHE_EVENT_PTR(PM_LD_MISS_L1), | |
616 | CACHE_EVENT_PTR(PM_LD_REF_L1), | |
617 | CACHE_EVENT_PTR(PM_L1_PREF), | |
618 | CACHE_EVENT_PTR(PM_ST_MISS_L1), | |
619 | CACHE_EVENT_PTR(PM_L1_ICACHE_MISS), | |
620 | CACHE_EVENT_PTR(PM_INST_FROM_L1), | |
621 | CACHE_EVENT_PTR(PM_IC_PREF_WRITE), | |
622 | CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS), | |
623 | CACHE_EVENT_PTR(PM_DATA_FROM_L3), | |
624 | CACHE_EVENT_PTR(PM_L3_PREF_ALL), | |
625 | CACHE_EVENT_PTR(PM_L2_ST_MISS), | |
626 | CACHE_EVENT_PTR(PM_L2_ST), | |
627 | ||
628 | CACHE_EVENT_PTR(PM_BR_MPRED_CMPL), | |
629 | CACHE_EVENT_PTR(PM_BRU_FIN), | |
630 | ||
631 | CACHE_EVENT_PTR(PM_DTLB_MISS), | |
632 | CACHE_EVENT_PTR(PM_ITLB_MISS), | |
633 | NULL | |
634 | }; | |
635 | ||
636 | static struct attribute_group power8_pmu_events_group = { | |
637 | .name = "events", | |
638 | .attrs = power8_events_attr, | |
639 | }; | |
640 | ||
e05b9b9e ME |
641 | PMU_FORMAT_ATTR(event, "config:0-49"); |
642 | PMU_FORMAT_ATTR(pmcxsel, "config:0-7"); | |
643 | PMU_FORMAT_ATTR(mark, "config:8"); | |
644 | PMU_FORMAT_ATTR(combine, "config:11"); | |
645 | PMU_FORMAT_ATTR(unit, "config:12-15"); | |
646 | PMU_FORMAT_ATTR(pmc, "config:16-19"); | |
647 | PMU_FORMAT_ATTR(cache_sel, "config:20-23"); | |
648 | PMU_FORMAT_ATTR(sample_mode, "config:24-28"); | |
649 | PMU_FORMAT_ATTR(thresh_sel, "config:29-31"); | |
650 | PMU_FORMAT_ATTR(thresh_stop, "config:32-35"); | |
651 | PMU_FORMAT_ATTR(thresh_start, "config:36-39"); | |
652 | PMU_FORMAT_ATTR(thresh_cmp, "config:40-49"); | |
653 | ||
654 | static struct attribute *power8_pmu_format_attr[] = { | |
655 | &format_attr_event.attr, | |
656 | &format_attr_pmcxsel.attr, | |
657 | &format_attr_mark.attr, | |
658 | &format_attr_combine.attr, | |
659 | &format_attr_unit.attr, | |
660 | &format_attr_pmc.attr, | |
661 | &format_attr_cache_sel.attr, | |
662 | &format_attr_sample_mode.attr, | |
663 | &format_attr_thresh_sel.attr, | |
664 | &format_attr_thresh_stop.attr, | |
665 | &format_attr_thresh_start.attr, | |
666 | &format_attr_thresh_cmp.attr, | |
667 | NULL, | |
668 | }; | |
669 | ||
670 | struct attribute_group power8_pmu_format_group = { | |
671 | .name = "format", | |
672 | .attrs = power8_pmu_format_attr, | |
673 | }; | |
674 | ||
675 | static const struct attribute_group *power8_pmu_attr_groups[] = { | |
676 | &power8_pmu_format_group, | |
e0728b50 | 677 | &power8_pmu_events_group, |
e05b9b9e ME |
678 | NULL, |
679 | }; | |
680 | ||
681 | static int power8_generic_events[] = { | |
682 | [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC, | |
683 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_GCT_NOSLOT_CYC, | |
684 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PM_CMPLU_STALL, | |
685 | [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL, | |
686 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_FIN, | |
687 | [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL, | |
2fdd313f ME |
688 | [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1, |
689 | [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1, | |
e05b9b9e ME |
690 | }; |
691 | ||
b1113557 AK |
692 | static u64 power8_bhrb_filter_map(u64 branch_sample_type) |
693 | { | |
694 | u64 pmu_bhrb_filter = 0; | |
b1113557 | 695 | |
7689bdca | 696 | /* BHRB and regular PMU events share the same privilege state |
b1113557 | 697 | * filter configuration. BHRB is always recorded along with a |
7689bdca AK |
698 | * regular PMU event. As the privilege state filter is handled |
699 | * in the basic PMC configuration of the accompanying regular | |
700 | * PMU event, we ignore any separate BHRB specific request. | |
b1113557 | 701 | */ |
b1113557 AK |
702 | |
703 | /* No branch filter requested */ | |
704 | if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY) | |
705 | return pmu_bhrb_filter; | |
706 | ||
707 | /* Invalid branch filter options - HW does not support */ | |
708 | if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_RETURN) | |
709 | return -1; | |
710 | ||
711 | if (branch_sample_type & PERF_SAMPLE_BRANCH_IND_CALL) | |
712 | return -1; | |
713 | ||
24f1a79a SE |
714 | if (branch_sample_type & PERF_SAMPLE_BRANCH_CALL) |
715 | return -1; | |
716 | ||
b1113557 AK |
717 | if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_CALL) { |
718 | pmu_bhrb_filter |= POWER8_MMCRA_IFM1; | |
719 | return pmu_bhrb_filter; | |
720 | } | |
721 | ||
722 | /* Every thing else is unsupported */ | |
723 | return -1; | |
724 | } | |
725 | ||
726 | static void power8_config_bhrb(u64 pmu_bhrb_filter) | |
727 | { | |
728 | /* Enable BHRB filter in PMU */ | |
729 | mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter)); | |
730 | } | |
731 | ||
2fdd313f ME |
732 | #define C(x) PERF_COUNT_HW_CACHE_##x |
733 | ||
734 | /* | |
735 | * Table of generalized cache-related events. | |
736 | * 0 means not supported, -1 means nonsensical, other values | |
737 | * are event codes. | |
738 | */ | |
739 | static int power8_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { | |
740 | [ C(L1D) ] = { | |
741 | [ C(OP_READ) ] = { | |
742 | [ C(RESULT_ACCESS) ] = PM_LD_REF_L1, | |
743 | [ C(RESULT_MISS) ] = PM_LD_MISS_L1, | |
744 | }, | |
745 | [ C(OP_WRITE) ] = { | |
746 | [ C(RESULT_ACCESS) ] = 0, | |
747 | [ C(RESULT_MISS) ] = PM_ST_MISS_L1, | |
748 | }, | |
749 | [ C(OP_PREFETCH) ] = { | |
750 | [ C(RESULT_ACCESS) ] = PM_L1_PREF, | |
751 | [ C(RESULT_MISS) ] = 0, | |
752 | }, | |
753 | }, | |
754 | [ C(L1I) ] = { | |
755 | [ C(OP_READ) ] = { | |
756 | [ C(RESULT_ACCESS) ] = PM_INST_FROM_L1, | |
757 | [ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS, | |
758 | }, | |
759 | [ C(OP_WRITE) ] = { | |
760 | [ C(RESULT_ACCESS) ] = PM_L1_DEMAND_WRITE, | |
761 | [ C(RESULT_MISS) ] = -1, | |
762 | }, | |
763 | [ C(OP_PREFETCH) ] = { | |
764 | [ C(RESULT_ACCESS) ] = PM_IC_PREF_WRITE, | |
765 | [ C(RESULT_MISS) ] = 0, | |
766 | }, | |
767 | }, | |
768 | [ C(LL) ] = { | |
769 | [ C(OP_READ) ] = { | |
770 | [ C(RESULT_ACCESS) ] = PM_DATA_FROM_L3, | |
771 | [ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS, | |
772 | }, | |
773 | [ C(OP_WRITE) ] = { | |
774 | [ C(RESULT_ACCESS) ] = PM_L2_ST, | |
775 | [ C(RESULT_MISS) ] = PM_L2_ST_MISS, | |
776 | }, | |
777 | [ C(OP_PREFETCH) ] = { | |
778 | [ C(RESULT_ACCESS) ] = PM_L3_PREF_ALL, | |
779 | [ C(RESULT_MISS) ] = 0, | |
780 | }, | |
781 | }, | |
782 | [ C(DTLB) ] = { | |
783 | [ C(OP_READ) ] = { | |
784 | [ C(RESULT_ACCESS) ] = 0, | |
785 | [ C(RESULT_MISS) ] = PM_DTLB_MISS, | |
786 | }, | |
787 | [ C(OP_WRITE) ] = { | |
788 | [ C(RESULT_ACCESS) ] = -1, | |
789 | [ C(RESULT_MISS) ] = -1, | |
790 | }, | |
791 | [ C(OP_PREFETCH) ] = { | |
792 | [ C(RESULT_ACCESS) ] = -1, | |
793 | [ C(RESULT_MISS) ] = -1, | |
794 | }, | |
795 | }, | |
796 | [ C(ITLB) ] = { | |
797 | [ C(OP_READ) ] = { | |
798 | [ C(RESULT_ACCESS) ] = 0, | |
799 | [ C(RESULT_MISS) ] = PM_ITLB_MISS, | |
800 | }, | |
801 | [ C(OP_WRITE) ] = { | |
802 | [ C(RESULT_ACCESS) ] = -1, | |
803 | [ C(RESULT_MISS) ] = -1, | |
804 | }, | |
805 | [ C(OP_PREFETCH) ] = { | |
806 | [ C(RESULT_ACCESS) ] = -1, | |
807 | [ C(RESULT_MISS) ] = -1, | |
808 | }, | |
809 | }, | |
810 | [ C(BPU) ] = { | |
811 | [ C(OP_READ) ] = { | |
812 | [ C(RESULT_ACCESS) ] = PM_BRU_FIN, | |
813 | [ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL, | |
814 | }, | |
815 | [ C(OP_WRITE) ] = { | |
816 | [ C(RESULT_ACCESS) ] = -1, | |
817 | [ C(RESULT_MISS) ] = -1, | |
818 | }, | |
819 | [ C(OP_PREFETCH) ] = { | |
820 | [ C(RESULT_ACCESS) ] = -1, | |
821 | [ C(RESULT_MISS) ] = -1, | |
822 | }, | |
823 | }, | |
824 | [ C(NODE) ] = { | |
825 | [ C(OP_READ) ] = { | |
826 | [ C(RESULT_ACCESS) ] = -1, | |
827 | [ C(RESULT_MISS) ] = -1, | |
828 | }, | |
829 | [ C(OP_WRITE) ] = { | |
830 | [ C(RESULT_ACCESS) ] = -1, | |
831 | [ C(RESULT_MISS) ] = -1, | |
832 | }, | |
833 | [ C(OP_PREFETCH) ] = { | |
834 | [ C(RESULT_ACCESS) ] = -1, | |
835 | [ C(RESULT_MISS) ] = -1, | |
836 | }, | |
837 | }, | |
838 | }; | |
839 | ||
840 | #undef C | |
841 | ||
e05b9b9e ME |
842 | static struct power_pmu power8_pmu = { |
843 | .name = "POWER8", | |
844 | .n_counter = 6, | |
845 | .max_alternatives = MAX_ALT + 1, | |
846 | .add_fields = POWER8_ADD_FIELDS, | |
847 | .test_adder = POWER8_TEST_ADDER, | |
848 | .compute_mmcr = power8_compute_mmcr, | |
b1113557 AK |
849 | .config_bhrb = power8_config_bhrb, |
850 | .bhrb_filter_map = power8_bhrb_filter_map, | |
e05b9b9e ME |
851 | .get_constraint = power8_get_constraint, |
852 | .get_alternatives = power8_get_alternatives, | |
853 | .disable_pmc = power8_disable_pmc, | |
370f06c8 | 854 | .flags = PPMU_HAS_SIER | PPMU_ARCH_207S, |
e05b9b9e ME |
855 | .n_generic = ARRAY_SIZE(power8_generic_events), |
856 | .generic_events = power8_generic_events, | |
2fdd313f | 857 | .cache_events = &power8_cache_events, |
e05b9b9e | 858 | .attr_groups = power8_pmu_attr_groups, |
b1113557 | 859 | .bhrb_nr = 32, |
e05b9b9e ME |
860 | }; |
861 | ||
862 | static int __init init_power8_pmu(void) | |
863 | { | |
5d7ead00 ME |
864 | int rc; |
865 | ||
e05b9b9e ME |
866 | if (!cur_cpu_spec->oprofile_cpu_type || |
867 | strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power8")) | |
868 | return -ENODEV; | |
869 | ||
5d7ead00 ME |
870 | rc = register_power_pmu(&power8_pmu); |
871 | if (rc) | |
872 | return rc; | |
873 | ||
874 | /* Tell userspace that EBB is supported */ | |
875 | cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB; | |
876 | ||
c2e37a26 ME |
877 | if (cpu_has_feature(CPU_FTR_PMAO_BUG)) |
878 | pr_info("PMAO restore workaround active.\n"); | |
879 | ||
5d7ead00 | 880 | return 0; |
e05b9b9e ME |
881 | } |
882 | early_initcall(init_power8_pmu); |