Commit | Line | Data |
---|---|---|
de0428a7 | 1 | #include <linux/perf_event.h> |
1018faa6 | 2 | #include <linux/export.h> |
de0428a7 KW |
3 | #include <linux/types.h> |
4 | #include <linux/init.h> | |
5 | #include <linux/slab.h> | |
d6eed550 | 6 | #include <asm/apicdef.h> |
de0428a7 KW |
7 | |
8 | #include "perf_event.h" | |
f22f54f4 | 9 | |
caaa8be3 | 10 | static __initconst const u64 amd_hw_cache_event_ids |
f22f54f4 PZ |
11 | [PERF_COUNT_HW_CACHE_MAX] |
12 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
13 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | |
14 | { | |
15 | [ C(L1D) ] = { | |
16 | [ C(OP_READ) ] = { | |
17 | [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ | |
83112e68 | 18 | [ C(RESULT_MISS) ] = 0x0141, /* Data Cache Misses */ |
f22f54f4 PZ |
19 | }, |
20 | [ C(OP_WRITE) ] = { | |
21 | [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */ | |
22 | [ C(RESULT_MISS) ] = 0, | |
23 | }, | |
24 | [ C(OP_PREFETCH) ] = { | |
25 | [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */ | |
26 | [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */ | |
27 | }, | |
28 | }, | |
29 | [ C(L1I ) ] = { | |
30 | [ C(OP_READ) ] = { | |
31 | [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */ | |
32 | [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */ | |
33 | }, | |
34 | [ C(OP_WRITE) ] = { | |
35 | [ C(RESULT_ACCESS) ] = -1, | |
36 | [ C(RESULT_MISS) ] = -1, | |
37 | }, | |
38 | [ C(OP_PREFETCH) ] = { | |
39 | [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */ | |
40 | [ C(RESULT_MISS) ] = 0, | |
41 | }, | |
42 | }, | |
43 | [ C(LL ) ] = { | |
44 | [ C(OP_READ) ] = { | |
45 | [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */ | |
46 | [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */ | |
47 | }, | |
48 | [ C(OP_WRITE) ] = { | |
49 | [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */ | |
50 | [ C(RESULT_MISS) ] = 0, | |
51 | }, | |
52 | [ C(OP_PREFETCH) ] = { | |
53 | [ C(RESULT_ACCESS) ] = 0, | |
54 | [ C(RESULT_MISS) ] = 0, | |
55 | }, | |
56 | }, | |
57 | [ C(DTLB) ] = { | |
58 | [ C(OP_READ) ] = { | |
59 | [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ | |
ba0cef3d | 60 | [ C(RESULT_MISS) ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */ |
f22f54f4 PZ |
61 | }, |
62 | [ C(OP_WRITE) ] = { | |
63 | [ C(RESULT_ACCESS) ] = 0, | |
64 | [ C(RESULT_MISS) ] = 0, | |
65 | }, | |
66 | [ C(OP_PREFETCH) ] = { | |
67 | [ C(RESULT_ACCESS) ] = 0, | |
68 | [ C(RESULT_MISS) ] = 0, | |
69 | }, | |
70 | }, | |
71 | [ C(ITLB) ] = { | |
72 | [ C(OP_READ) ] = { | |
73 | [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */ | |
ba0cef3d | 74 | [ C(RESULT_MISS) ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */ |
f22f54f4 PZ |
75 | }, |
76 | [ C(OP_WRITE) ] = { | |
77 | [ C(RESULT_ACCESS) ] = -1, | |
78 | [ C(RESULT_MISS) ] = -1, | |
79 | }, | |
80 | [ C(OP_PREFETCH) ] = { | |
81 | [ C(RESULT_ACCESS) ] = -1, | |
82 | [ C(RESULT_MISS) ] = -1, | |
83 | }, | |
84 | }, | |
85 | [ C(BPU ) ] = { | |
86 | [ C(OP_READ) ] = { | |
87 | [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */ | |
88 | [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */ | |
89 | }, | |
90 | [ C(OP_WRITE) ] = { | |
91 | [ C(RESULT_ACCESS) ] = -1, | |
92 | [ C(RESULT_MISS) ] = -1, | |
93 | }, | |
94 | [ C(OP_PREFETCH) ] = { | |
95 | [ C(RESULT_ACCESS) ] = -1, | |
96 | [ C(RESULT_MISS) ] = -1, | |
97 | }, | |
98 | }, | |
89d6c0b5 PZ |
99 | [ C(NODE) ] = { |
100 | [ C(OP_READ) ] = { | |
101 | [ C(RESULT_ACCESS) ] = 0xb8e9, /* CPU Request to Memory, l+r */ | |
102 | [ C(RESULT_MISS) ] = 0x98e9, /* CPU Request to Memory, r */ | |
103 | }, | |
104 | [ C(OP_WRITE) ] = { | |
105 | [ C(RESULT_ACCESS) ] = -1, | |
106 | [ C(RESULT_MISS) ] = -1, | |
107 | }, | |
108 | [ C(OP_PREFETCH) ] = { | |
109 | [ C(RESULT_ACCESS) ] = -1, | |
110 | [ C(RESULT_MISS) ] = -1, | |
111 | }, | |
112 | }, | |
f22f54f4 PZ |
113 | }; |
114 | ||
115 | /* | |
116 | * AMD Performance Monitor K7 and later. | |
117 | */ | |
118 | static const u64 amd_perfmon_event_map[] = | |
119 | { | |
91fc4cc0 IM |
120 | [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, |
121 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, | |
122 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080, | |
123 | [PERF_COUNT_HW_CACHE_MISSES] = 0x0081, | |
124 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, | |
125 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, | |
126 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */ | |
127 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */ | |
f22f54f4 PZ |
128 | }; |
129 | ||
130 | static u64 amd_pmu_event_map(int hw_event) | |
131 | { | |
132 | return amd_perfmon_event_map[hw_event]; | |
133 | } | |
134 | ||
b4cdc5c2 | 135 | static int amd_pmu_hw_config(struct perf_event *event) |
f22f54f4 | 136 | { |
450bbd49 | 137 | int ret; |
b4cdc5c2 | 138 | |
450bbd49 RR |
139 | /* pass precise event sampling to ibs: */ |
140 | if (event->attr.precise_ip && get_ibs_caps()) | |
141 | return -ENOENT; | |
142 | ||
143 | ret = x86_pmu_hw_config(event); | |
b4cdc5c2 PZ |
144 | if (ret) |
145 | return ret; | |
146 | ||
2481c5fa SE |
147 | if (has_branch_stack(event)) |
148 | return -EOPNOTSUPP; | |
149 | ||
011af857 JR |
150 | if (event->attr.exclude_host && event->attr.exclude_guest) |
151 | /* | |
152 | * When HO == GO == 1 the hardware treats that as GO == HO == 0 | |
153 | * and will count in both modes. We don't want to count in that | |
154 | * case so we emulate no-counting by setting US = OS = 0. | |
155 | */ | |
156 | event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR | | |
157 | ARCH_PERFMON_EVENTSEL_OS); | |
158 | else if (event->attr.exclude_host) | |
159 | event->hw.config |= AMD_PERFMON_EVENTSEL_GUESTONLY; | |
160 | else if (event->attr.exclude_guest) | |
161 | event->hw.config |= AMD_PERFMON_EVENTSEL_HOSTONLY; | |
162 | ||
b4cdc5c2 PZ |
163 | if (event->attr.type != PERF_TYPE_RAW) |
164 | return 0; | |
165 | ||
166 | event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK; | |
167 | ||
168 | return 0; | |
f22f54f4 PZ |
169 | } |
170 | ||
171 | /* | |
172 | * AMD64 events are detected based on their event codes. | |
173 | */ | |
4979d272 RR |
174 | static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc) |
175 | { | |
176 | return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff); | |
177 | } | |
178 | ||
f22f54f4 PZ |
179 | static inline int amd_is_nb_event(struct hw_perf_event *hwc) |
180 | { | |
181 | return (hwc->config & 0xe0) == 0xe0; | |
182 | } | |
183 | ||
b38b24ea PZ |
184 | static inline int amd_has_nb(struct cpu_hw_events *cpuc) |
185 | { | |
186 | struct amd_nb *nb = cpuc->amd_nb; | |
187 | ||
188 | return nb && nb->nb_id != -1; | |
189 | } | |
190 | ||
f22f54f4 PZ |
191 | static void amd_put_event_constraints(struct cpu_hw_events *cpuc, |
192 | struct perf_event *event) | |
193 | { | |
194 | struct hw_perf_event *hwc = &event->hw; | |
195 | struct amd_nb *nb = cpuc->amd_nb; | |
196 | int i; | |
197 | ||
198 | /* | |
199 | * only care about NB events | |
200 | */ | |
b38b24ea | 201 | if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc))) |
f22f54f4 PZ |
202 | return; |
203 | ||
204 | /* | |
205 | * need to scan whole list because event may not have | |
206 | * been assigned during scheduling | |
207 | * | |
208 | * no race condition possible because event can only | |
209 | * be removed on one CPU at a time AND PMU is disabled | |
210 | * when we come here | |
211 | */ | |
948b1bb8 | 212 | for (i = 0; i < x86_pmu.num_counters; i++) { |
5f09fc68 | 213 | if (cmpxchg(nb->owners + i, event, NULL) == event) |
f22f54f4 | 214 | break; |
f22f54f4 PZ |
215 | } |
216 | } | |
217 | ||
218 | /* | |
219 | * AMD64 NorthBridge events need special treatment because | |
220 | * counter access needs to be synchronized across all cores | |
221 | * of a package. Refer to BKDG section 3.12 | |
222 | * | |
223 | * NB events are events measuring L3 cache, Hypertransport | |
224 | * traffic. They are identified by an event code >= 0xe00. | |
225 | * They measure events on the NorthBride which is shared | |
226 | * by all cores on a package. NB events are counted on a | |
227 | * shared set of counters. When a NB event is programmed | |
228 | * in a counter, the data actually comes from a shared | |
229 | * counter. Thus, access to those counters needs to be | |
230 | * synchronized. | |
231 | * | |
232 | * We implement the synchronization such that no two cores | |
233 | * can be measuring NB events using the same counters. Thus, | |
234 | * we maintain a per-NB allocation table. The available slot | |
235 | * is propagated using the event_constraint structure. | |
236 | * | |
237 | * We provide only one choice for each NB event based on | |
238 | * the fact that only NB events have restrictions. Consequently, | |
239 | * if a counter is available, there is a guarantee the NB event | |
240 | * will be assigned to it. If no slot is available, an empty | |
241 | * constraint is returned and scheduling will eventually fail | |
242 | * for this event. | |
243 | * | |
244 | * Note that all cores attached the same NB compete for the same | |
245 | * counters to host NB events, this is why we use atomic ops. Some | |
246 | * multi-chip CPUs may have more than one NB. | |
247 | * | |
248 | * Given that resources are allocated (cmpxchg), they must be | |
249 | * eventually freed for others to use. This is accomplished by | |
250 | * calling amd_put_event_constraints(). | |
251 | * | |
252 | * Non NB events are not impacted by this restriction. | |
253 | */ | |
254 | static struct event_constraint * | |
255 | amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) | |
256 | { | |
257 | struct hw_perf_event *hwc = &event->hw; | |
258 | struct amd_nb *nb = cpuc->amd_nb; | |
259 | struct perf_event *old = NULL; | |
948b1bb8 | 260 | int max = x86_pmu.num_counters; |
f22f54f4 PZ |
261 | int i, j, k = -1; |
262 | ||
263 | /* | |
264 | * if not NB event or no NB, then no constraints | |
265 | */ | |
b38b24ea | 266 | if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc))) |
f22f54f4 PZ |
267 | return &unconstrained; |
268 | ||
269 | /* | |
270 | * detect if already present, if so reuse | |
271 | * | |
272 | * cannot merge with actual allocation | |
273 | * because of possible holes | |
274 | * | |
275 | * event can already be present yet not assigned (in hwc->idx) | |
276 | * because of successive calls to x86_schedule_events() from | |
277 | * hw_perf_group_sched_in() without hw_perf_enable() | |
278 | */ | |
279 | for (i = 0; i < max; i++) { | |
280 | /* | |
281 | * keep track of first free slot | |
282 | */ | |
283 | if (k == -1 && !nb->owners[i]) | |
284 | k = i; | |
285 | ||
286 | /* already present, reuse */ | |
287 | if (nb->owners[i] == event) | |
288 | goto done; | |
289 | } | |
290 | /* | |
291 | * not present, so grab a new slot | |
292 | * starting either at: | |
293 | */ | |
294 | if (hwc->idx != -1) { | |
295 | /* previous assignment */ | |
296 | i = hwc->idx; | |
297 | } else if (k != -1) { | |
298 | /* start from free slot found */ | |
299 | i = k; | |
300 | } else { | |
301 | /* | |
302 | * event not found, no slot found in | |
303 | * first pass, try again from the | |
304 | * beginning | |
305 | */ | |
306 | i = 0; | |
307 | } | |
308 | j = i; | |
309 | do { | |
310 | old = cmpxchg(nb->owners+i, NULL, event); | |
311 | if (!old) | |
312 | break; | |
313 | if (++i == max) | |
314 | i = 0; | |
315 | } while (i != j); | |
316 | done: | |
317 | if (!old) | |
318 | return &nb->event_constraints[i]; | |
319 | ||
320 | return &emptyconstraint; | |
321 | } | |
322 | ||
c079c791 | 323 | static struct amd_nb *amd_alloc_nb(int cpu) |
f22f54f4 PZ |
324 | { |
325 | struct amd_nb *nb; | |
326 | int i; | |
327 | ||
034c6efa PZ |
328 | nb = kmalloc_node(sizeof(struct amd_nb), GFP_KERNEL | __GFP_ZERO, |
329 | cpu_to_node(cpu)); | |
f22f54f4 PZ |
330 | if (!nb) |
331 | return NULL; | |
332 | ||
c079c791 | 333 | nb->nb_id = -1; |
f22f54f4 PZ |
334 | |
335 | /* | |
336 | * initialize all possible NB constraints | |
337 | */ | |
948b1bb8 | 338 | for (i = 0; i < x86_pmu.num_counters; i++) { |
34538ee7 | 339 | __set_bit(i, nb->event_constraints[i].idxmsk); |
f22f54f4 PZ |
340 | nb->event_constraints[i].weight = 1; |
341 | } | |
342 | return nb; | |
343 | } | |
344 | ||
b38b24ea PZ |
345 | static int amd_pmu_cpu_prepare(int cpu) |
346 | { | |
347 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); | |
348 | ||
349 | WARN_ON_ONCE(cpuc->amd_nb); | |
350 | ||
351 | if (boot_cpu_data.x86_max_cores < 2) | |
352 | return NOTIFY_OK; | |
353 | ||
c079c791 | 354 | cpuc->amd_nb = amd_alloc_nb(cpu); |
b38b24ea PZ |
355 | if (!cpuc->amd_nb) |
356 | return NOTIFY_BAD; | |
357 | ||
358 | return NOTIFY_OK; | |
359 | } | |
360 | ||
361 | static void amd_pmu_cpu_starting(int cpu) | |
f22f54f4 | 362 | { |
b38b24ea PZ |
363 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); |
364 | struct amd_nb *nb; | |
f22f54f4 PZ |
365 | int i, nb_id; |
366 | ||
1018faa6 JR |
367 | cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY; |
368 | ||
369 | if (boot_cpu_data.x86_max_cores < 2 || boot_cpu_data.x86 == 0x15) | |
f22f54f4 PZ |
370 | return; |
371 | ||
f22f54f4 | 372 | nb_id = amd_get_nb_id(cpu); |
b38b24ea | 373 | WARN_ON_ONCE(nb_id == BAD_APICID); |
f22f54f4 | 374 | |
f22f54f4 | 375 | for_each_online_cpu(i) { |
b38b24ea PZ |
376 | nb = per_cpu(cpu_hw_events, i).amd_nb; |
377 | if (WARN_ON_ONCE(!nb)) | |
f22f54f4 | 378 | continue; |
f22f54f4 | 379 | |
b38b24ea | 380 | if (nb->nb_id == nb_id) { |
7fdba1ca | 381 | cpuc->kfree_on_online = cpuc->amd_nb; |
b38b24ea PZ |
382 | cpuc->amd_nb = nb; |
383 | break; | |
384 | } | |
f22f54f4 | 385 | } |
b38b24ea PZ |
386 | |
387 | cpuc->amd_nb->nb_id = nb_id; | |
388 | cpuc->amd_nb->refcnt++; | |
f22f54f4 PZ |
389 | } |
390 | ||
b38b24ea | 391 | static void amd_pmu_cpu_dead(int cpu) |
f22f54f4 PZ |
392 | { |
393 | struct cpu_hw_events *cpuhw; | |
394 | ||
395 | if (boot_cpu_data.x86_max_cores < 2) | |
396 | return; | |
397 | ||
398 | cpuhw = &per_cpu(cpu_hw_events, cpu); | |
399 | ||
a90110c6 | 400 | if (cpuhw->amd_nb) { |
b38b24ea PZ |
401 | struct amd_nb *nb = cpuhw->amd_nb; |
402 | ||
403 | if (nb->nb_id == -1 || --nb->refcnt == 0) | |
404 | kfree(nb); | |
f22f54f4 | 405 | |
a90110c6 RW |
406 | cpuhw->amd_nb = NULL; |
407 | } | |
f22f54f4 PZ |
408 | } |
409 | ||
641cc938 JO |
410 | PMU_FORMAT_ATTR(event, "config:0-7,32-35"); |
411 | PMU_FORMAT_ATTR(umask, "config:8-15" ); | |
412 | PMU_FORMAT_ATTR(edge, "config:18" ); | |
413 | PMU_FORMAT_ATTR(inv, "config:23" ); | |
414 | PMU_FORMAT_ATTR(cmask, "config:24-31" ); | |
415 | ||
416 | static struct attribute *amd_format_attr[] = { | |
417 | &format_attr_event.attr, | |
418 | &format_attr_umask.attr, | |
419 | &format_attr_edge.attr, | |
420 | &format_attr_inv.attr, | |
421 | &format_attr_cmask.attr, | |
422 | NULL, | |
423 | }; | |
424 | ||
caaa8be3 | 425 | static __initconst const struct x86_pmu amd_pmu = { |
3f6da390 PZ |
426 | .name = "AMD", |
427 | .handle_irq = x86_pmu_handle_irq, | |
428 | .disable_all = x86_pmu_disable_all, | |
429 | .enable_all = x86_pmu_enable_all, | |
430 | .enable = x86_pmu_enable_event, | |
431 | .disable = x86_pmu_disable_event, | |
b4cdc5c2 | 432 | .hw_config = amd_pmu_hw_config, |
a072738e | 433 | .schedule_events = x86_schedule_events, |
3f6da390 PZ |
434 | .eventsel = MSR_K7_EVNTSEL0, |
435 | .perfctr = MSR_K7_PERFCTR0, | |
436 | .event_map = amd_pmu_event_map, | |
3f6da390 | 437 | .max_events = ARRAY_SIZE(amd_perfmon_event_map), |
ee5789db | 438 | .num_counters = AMD64_NUM_COUNTERS, |
948b1bb8 RR |
439 | .cntval_bits = 48, |
440 | .cntval_mask = (1ULL << 48) - 1, | |
3f6da390 PZ |
441 | .apic = 1, |
442 | /* use highest bit to detect overflow */ | |
443 | .max_period = (1ULL << 47) - 1, | |
444 | .get_event_constraints = amd_get_event_constraints, | |
445 | .put_event_constraints = amd_put_event_constraints, | |
446 | ||
641cc938 JO |
447 | .format_attrs = amd_format_attr, |
448 | ||
b38b24ea PZ |
449 | .cpu_prepare = amd_pmu_cpu_prepare, |
450 | .cpu_starting = amd_pmu_cpu_starting, | |
451 | .cpu_dead = amd_pmu_cpu_dead, | |
3f6da390 PZ |
452 | }; |
453 | ||
4979d272 RR |
454 | /* AMD Family 15h */ |
455 | ||
456 | #define AMD_EVENT_TYPE_MASK 0x000000F0ULL | |
457 | ||
458 | #define AMD_EVENT_FP 0x00000000ULL ... 0x00000010ULL | |
459 | #define AMD_EVENT_LS 0x00000020ULL ... 0x00000030ULL | |
460 | #define AMD_EVENT_DC 0x00000040ULL ... 0x00000050ULL | |
461 | #define AMD_EVENT_CU 0x00000060ULL ... 0x00000070ULL | |
462 | #define AMD_EVENT_IC_DE 0x00000080ULL ... 0x00000090ULL | |
463 | #define AMD_EVENT_EX_LS 0x000000C0ULL | |
464 | #define AMD_EVENT_DE 0x000000D0ULL | |
465 | #define AMD_EVENT_NB 0x000000E0ULL ... 0x000000F0ULL | |
466 | ||
467 | /* | |
468 | * AMD family 15h event code/PMC mappings: | |
469 | * | |
470 | * type = event_code & 0x0F0: | |
471 | * | |
472 | * 0x000 FP PERF_CTL[5:3] | |
473 | * 0x010 FP PERF_CTL[5:3] | |
474 | * 0x020 LS PERF_CTL[5:0] | |
475 | * 0x030 LS PERF_CTL[5:0] | |
476 | * 0x040 DC PERF_CTL[5:0] | |
477 | * 0x050 DC PERF_CTL[5:0] | |
478 | * 0x060 CU PERF_CTL[2:0] | |
479 | * 0x070 CU PERF_CTL[2:0] | |
480 | * 0x080 IC/DE PERF_CTL[2:0] | |
481 | * 0x090 IC/DE PERF_CTL[2:0] | |
482 | * 0x0A0 --- | |
483 | * 0x0B0 --- | |
484 | * 0x0C0 EX/LS PERF_CTL[5:0] | |
485 | * 0x0D0 DE PERF_CTL[2:0] | |
486 | * 0x0E0 NB NB_PERF_CTL[3:0] | |
487 | * 0x0F0 NB NB_PERF_CTL[3:0] | |
488 | * | |
489 | * Exceptions: | |
490 | * | |
855357a2 | 491 | * 0x000 FP PERF_CTL[3], PERF_CTL[5:3] (*) |
4979d272 | 492 | * 0x003 FP PERF_CTL[3] |
855357a2 | 493 | * 0x004 FP PERF_CTL[3], PERF_CTL[5:3] (*) |
4979d272 RR |
494 | * 0x00B FP PERF_CTL[3] |
495 | * 0x00D FP PERF_CTL[3] | |
496 | * 0x023 DE PERF_CTL[2:0] | |
497 | * 0x02D LS PERF_CTL[3] | |
498 | * 0x02E LS PERF_CTL[3,0] | |
5bcdf5e4 | 499 | * 0x031 LS PERF_CTL[2:0] (**) |
4979d272 RR |
500 | * 0x043 CU PERF_CTL[2:0] |
501 | * 0x045 CU PERF_CTL[2:0] | |
502 | * 0x046 CU PERF_CTL[2:0] | |
503 | * 0x054 CU PERF_CTL[2:0] | |
504 | * 0x055 CU PERF_CTL[2:0] | |
505 | * 0x08F IC PERF_CTL[0] | |
506 | * 0x187 DE PERF_CTL[0] | |
507 | * 0x188 DE PERF_CTL[0] | |
508 | * 0x0DB EX PERF_CTL[5:0] | |
509 | * 0x0DC LS PERF_CTL[5:0] | |
510 | * 0x0DD LS PERF_CTL[5:0] | |
511 | * 0x0DE LS PERF_CTL[5:0] | |
512 | * 0x0DF LS PERF_CTL[5:0] | |
5bcdf5e4 | 513 | * 0x1C0 EX PERF_CTL[5:3] |
4979d272 RR |
514 | * 0x1D6 EX PERF_CTL[5:0] |
515 | * 0x1D8 EX PERF_CTL[5:0] | |
855357a2 | 516 | * |
5bcdf5e4 RR |
517 | * (*) depending on the umask all FPU counters may be used |
518 | * (**) only one unitmask enabled at a time | |
4979d272 RR |
519 | */ |
520 | ||
521 | static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0); | |
522 | static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0); | |
523 | static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0); | |
bc1738f6 | 524 | static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0); |
4979d272 RR |
525 | static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0); |
526 | static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0); | |
527 | ||
528 | static struct event_constraint * | |
529 | amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event) | |
530 | { | |
855357a2 RR |
531 | struct hw_perf_event *hwc = &event->hw; |
532 | unsigned int event_code = amd_get_event_code(hwc); | |
4979d272 RR |
533 | |
534 | switch (event_code & AMD_EVENT_TYPE_MASK) { | |
535 | case AMD_EVENT_FP: | |
536 | switch (event_code) { | |
855357a2 RR |
537 | case 0x000: |
538 | if (!(hwc->config & 0x0000F000ULL)) | |
539 | break; | |
540 | if (!(hwc->config & 0x00000F00ULL)) | |
541 | break; | |
542 | return &amd_f15_PMC3; | |
543 | case 0x004: | |
544 | if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1) | |
545 | break; | |
546 | return &amd_f15_PMC3; | |
4979d272 RR |
547 | case 0x003: |
548 | case 0x00B: | |
549 | case 0x00D: | |
550 | return &amd_f15_PMC3; | |
4979d272 | 551 | } |
855357a2 | 552 | return &amd_f15_PMC53; |
4979d272 RR |
553 | case AMD_EVENT_LS: |
554 | case AMD_EVENT_DC: | |
555 | case AMD_EVENT_EX_LS: | |
556 | switch (event_code) { | |
557 | case 0x023: | |
558 | case 0x043: | |
559 | case 0x045: | |
560 | case 0x046: | |
561 | case 0x054: | |
562 | case 0x055: | |
563 | return &amd_f15_PMC20; | |
564 | case 0x02D: | |
565 | return &amd_f15_PMC3; | |
566 | case 0x02E: | |
567 | return &amd_f15_PMC30; | |
5bcdf5e4 RR |
568 | case 0x031: |
569 | if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1) | |
570 | return &amd_f15_PMC20; | |
571 | return &emptyconstraint; | |
572 | case 0x1C0: | |
573 | return &amd_f15_PMC53; | |
4979d272 RR |
574 | default: |
575 | return &amd_f15_PMC50; | |
576 | } | |
577 | case AMD_EVENT_CU: | |
578 | case AMD_EVENT_IC_DE: | |
579 | case AMD_EVENT_DE: | |
580 | switch (event_code) { | |
581 | case 0x08F: | |
582 | case 0x187: | |
583 | case 0x188: | |
584 | return &amd_f15_PMC0; | |
585 | case 0x0DB ... 0x0DF: | |
586 | case 0x1D6: | |
587 | case 0x1D8: | |
588 | return &amd_f15_PMC50; | |
589 | default: | |
590 | return &amd_f15_PMC20; | |
591 | } | |
592 | case AMD_EVENT_NB: | |
593 | /* not yet implemented */ | |
594 | return &emptyconstraint; | |
595 | default: | |
596 | return &emptyconstraint; | |
597 | } | |
598 | } | |
599 | ||
600 | static __initconst const struct x86_pmu amd_pmu_f15h = { | |
601 | .name = "AMD Family 15h", | |
602 | .handle_irq = x86_pmu_handle_irq, | |
603 | .disable_all = x86_pmu_disable_all, | |
604 | .enable_all = x86_pmu_enable_all, | |
605 | .enable = x86_pmu_enable_event, | |
606 | .disable = x86_pmu_disable_event, | |
607 | .hw_config = amd_pmu_hw_config, | |
608 | .schedule_events = x86_schedule_events, | |
609 | .eventsel = MSR_F15H_PERF_CTL, | |
610 | .perfctr = MSR_F15H_PERF_CTR, | |
611 | .event_map = amd_pmu_event_map, | |
612 | .max_events = ARRAY_SIZE(amd_perfmon_event_map), | |
ee5789db | 613 | .num_counters = AMD64_NUM_COUNTERS_F15H, |
4979d272 RR |
614 | .cntval_bits = 48, |
615 | .cntval_mask = (1ULL << 48) - 1, | |
616 | .apic = 1, | |
617 | /* use highest bit to detect overflow */ | |
618 | .max_period = (1ULL << 47) - 1, | |
619 | .get_event_constraints = amd_get_event_constraints_f15h, | |
620 | /* nortbridge counters not yet implemented: */ | |
621 | #if 0 | |
622 | .put_event_constraints = amd_put_event_constraints, | |
623 | ||
624 | .cpu_prepare = amd_pmu_cpu_prepare, | |
4979d272 RR |
625 | .cpu_dead = amd_pmu_cpu_dead, |
626 | #endif | |
1018faa6 | 627 | .cpu_starting = amd_pmu_cpu_starting, |
641cc938 | 628 | .format_attrs = amd_format_attr, |
4979d272 RR |
629 | }; |
630 | ||
de0428a7 | 631 | __init int amd_pmu_init(void) |
f22f54f4 PZ |
632 | { |
633 | /* Performance-monitoring supported from K7 and later: */ | |
634 | if (boot_cpu_data.x86 < 6) | |
635 | return -ENODEV; | |
636 | ||
4979d272 RR |
637 | /* |
638 | * If core performance counter extensions exists, it must be | |
639 | * family 15h, otherwise fail. See x86_pmu_addr_offset(). | |
640 | */ | |
641 | switch (boot_cpu_data.x86) { | |
642 | case 0x15: | |
643 | if (!cpu_has_perfctr_core) | |
644 | return -ENODEV; | |
645 | x86_pmu = amd_pmu_f15h; | |
646 | break; | |
647 | default: | |
648 | if (cpu_has_perfctr_core) | |
649 | return -ENODEV; | |
650 | x86_pmu = amd_pmu; | |
651 | break; | |
652 | } | |
f22f54f4 PZ |
653 | |
654 | /* Events are common for all AMDs */ | |
655 | memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, | |
656 | sizeof(hw_cache_event_ids)); | |
657 | ||
f22f54f4 PZ |
658 | return 0; |
659 | } | |
1018faa6 JR |
660 | |
661 | void amd_pmu_enable_virt(void) | |
662 | { | |
663 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
664 | ||
665 | cpuc->perf_ctr_virt_mask = 0; | |
666 | ||
667 | /* Reload all events */ | |
668 | x86_pmu_disable_all(); | |
669 | x86_pmu_enable_all(0); | |
670 | } | |
671 | EXPORT_SYMBOL_GPL(amd_pmu_enable_virt); | |
672 | ||
673 | void amd_pmu_disable_virt(void) | |
674 | { | |
675 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
676 | ||
677 | /* | |
678 | * We only mask out the Host-only bit so that host-only counting works | |
679 | * when SVM is disabled. If someone sets up a guest-only counter when | |
680 | * SVM is disabled the Guest-only bits still gets set and the counter | |
681 | * will not count anything. | |
682 | */ | |
683 | cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY; | |
684 | ||
685 | /* Reload all events */ | |
686 | x86_pmu_disable_all(); | |
687 | x86_pmu_enable_all(0); | |
688 | } | |
689 | EXPORT_SYMBOL_GPL(amd_pmu_disable_virt); |