perf/x86-ibs: Take instruction pointer from ibs sample
[deliverable/linux.git] / arch / x86 / kernel / cpu / perf_event_amd.c
CommitLineData
de0428a7 1#include <linux/perf_event.h>
1018faa6 2#include <linux/export.h>
de0428a7
KW
3#include <linux/types.h>
4#include <linux/init.h>
5#include <linux/slab.h>
d6eed550 6#include <asm/apicdef.h>
de0428a7
KW
7
8#include "perf_event.h"
f22f54f4 9
caaa8be3 10static __initconst const u64 amd_hw_cache_event_ids
f22f54f4
PZ
11 [PERF_COUNT_HW_CACHE_MAX]
12 [PERF_COUNT_HW_CACHE_OP_MAX]
13 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
14{
15 [ C(L1D) ] = {
16 [ C(OP_READ) ] = {
17 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
83112e68 18 [ C(RESULT_MISS) ] = 0x0141, /* Data Cache Misses */
f22f54f4
PZ
19 },
20 [ C(OP_WRITE) ] = {
21 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
22 [ C(RESULT_MISS) ] = 0,
23 },
24 [ C(OP_PREFETCH) ] = {
25 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
26 [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
27 },
28 },
29 [ C(L1I ) ] = {
30 [ C(OP_READ) ] = {
31 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
32 [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
33 },
34 [ C(OP_WRITE) ] = {
35 [ C(RESULT_ACCESS) ] = -1,
36 [ C(RESULT_MISS) ] = -1,
37 },
38 [ C(OP_PREFETCH) ] = {
39 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
40 [ C(RESULT_MISS) ] = 0,
41 },
42 },
43 [ C(LL ) ] = {
44 [ C(OP_READ) ] = {
45 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
46 [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
47 },
48 [ C(OP_WRITE) ] = {
49 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
50 [ C(RESULT_MISS) ] = 0,
51 },
52 [ C(OP_PREFETCH) ] = {
53 [ C(RESULT_ACCESS) ] = 0,
54 [ C(RESULT_MISS) ] = 0,
55 },
56 },
57 [ C(DTLB) ] = {
58 [ C(OP_READ) ] = {
59 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
ba0cef3d 60 [ C(RESULT_MISS) ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */
f22f54f4
PZ
61 },
62 [ C(OP_WRITE) ] = {
63 [ C(RESULT_ACCESS) ] = 0,
64 [ C(RESULT_MISS) ] = 0,
65 },
66 [ C(OP_PREFETCH) ] = {
67 [ C(RESULT_ACCESS) ] = 0,
68 [ C(RESULT_MISS) ] = 0,
69 },
70 },
71 [ C(ITLB) ] = {
72 [ C(OP_READ) ] = {
73 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
ba0cef3d 74 [ C(RESULT_MISS) ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */
f22f54f4
PZ
75 },
76 [ C(OP_WRITE) ] = {
77 [ C(RESULT_ACCESS) ] = -1,
78 [ C(RESULT_MISS) ] = -1,
79 },
80 [ C(OP_PREFETCH) ] = {
81 [ C(RESULT_ACCESS) ] = -1,
82 [ C(RESULT_MISS) ] = -1,
83 },
84 },
85 [ C(BPU ) ] = {
86 [ C(OP_READ) ] = {
87 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
88 [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
89 },
90 [ C(OP_WRITE) ] = {
91 [ C(RESULT_ACCESS) ] = -1,
92 [ C(RESULT_MISS) ] = -1,
93 },
94 [ C(OP_PREFETCH) ] = {
95 [ C(RESULT_ACCESS) ] = -1,
96 [ C(RESULT_MISS) ] = -1,
97 },
98 },
89d6c0b5
PZ
99 [ C(NODE) ] = {
100 [ C(OP_READ) ] = {
101 [ C(RESULT_ACCESS) ] = 0xb8e9, /* CPU Request to Memory, l+r */
102 [ C(RESULT_MISS) ] = 0x98e9, /* CPU Request to Memory, r */
103 },
104 [ C(OP_WRITE) ] = {
105 [ C(RESULT_ACCESS) ] = -1,
106 [ C(RESULT_MISS) ] = -1,
107 },
108 [ C(OP_PREFETCH) ] = {
109 [ C(RESULT_ACCESS) ] = -1,
110 [ C(RESULT_MISS) ] = -1,
111 },
112 },
f22f54f4
PZ
113};
114
115/*
116 * AMD Performance Monitor K7 and later.
117 */
118static const u64 amd_perfmon_event_map[] =
119{
91fc4cc0
IM
120 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
121 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
122 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
123 [PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
124 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
125 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
126 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
127 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */
f22f54f4
PZ
128};
129
130static u64 amd_pmu_event_map(int hw_event)
131{
132 return amd_perfmon_event_map[hw_event];
133}
134
b4cdc5c2 135static int amd_pmu_hw_config(struct perf_event *event)
f22f54f4 136{
b4cdc5c2
PZ
137 int ret = x86_pmu_hw_config(event);
138
139 if (ret)
140 return ret;
141
2481c5fa
SE
142 if (has_branch_stack(event))
143 return -EOPNOTSUPP;
144
011af857
JR
145 if (event->attr.exclude_host && event->attr.exclude_guest)
146 /*
147 * When HO == GO == 1 the hardware treats that as GO == HO == 0
148 * and will count in both modes. We don't want to count in that
149 * case so we emulate no-counting by setting US = OS = 0.
150 */
151 event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
152 ARCH_PERFMON_EVENTSEL_OS);
153 else if (event->attr.exclude_host)
154 event->hw.config |= AMD_PERFMON_EVENTSEL_GUESTONLY;
155 else if (event->attr.exclude_guest)
156 event->hw.config |= AMD_PERFMON_EVENTSEL_HOSTONLY;
157
b4cdc5c2
PZ
158 if (event->attr.type != PERF_TYPE_RAW)
159 return 0;
160
161 event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
162
163 return 0;
f22f54f4
PZ
164}
165
166/*
167 * AMD64 events are detected based on their event codes.
168 */
4979d272
RR
169static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
170{
171 return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
172}
173
f22f54f4
PZ
174static inline int amd_is_nb_event(struct hw_perf_event *hwc)
175{
176 return (hwc->config & 0xe0) == 0xe0;
177}
178
b38b24ea
PZ
179static inline int amd_has_nb(struct cpu_hw_events *cpuc)
180{
181 struct amd_nb *nb = cpuc->amd_nb;
182
183 return nb && nb->nb_id != -1;
184}
185
f22f54f4
PZ
186static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
187 struct perf_event *event)
188{
189 struct hw_perf_event *hwc = &event->hw;
190 struct amd_nb *nb = cpuc->amd_nb;
191 int i;
192
193 /*
194 * only care about NB events
195 */
b38b24ea 196 if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
f22f54f4
PZ
197 return;
198
199 /*
200 * need to scan whole list because event may not have
201 * been assigned during scheduling
202 *
203 * no race condition possible because event can only
204 * be removed on one CPU at a time AND PMU is disabled
205 * when we come here
206 */
948b1bb8 207 for (i = 0; i < x86_pmu.num_counters; i++) {
5f09fc68 208 if (cmpxchg(nb->owners + i, event, NULL) == event)
f22f54f4 209 break;
f22f54f4
PZ
210 }
211}
212
213 /*
214 * AMD64 NorthBridge events need special treatment because
215 * counter access needs to be synchronized across all cores
216 * of a package. Refer to BKDG section 3.12
217 *
218 * NB events are events measuring L3 cache, Hypertransport
219 * traffic. They are identified by an event code >= 0xe00.
220 * They measure events on the NorthBride which is shared
221 * by all cores on a package. NB events are counted on a
222 * shared set of counters. When a NB event is programmed
223 * in a counter, the data actually comes from a shared
224 * counter. Thus, access to those counters needs to be
225 * synchronized.
226 *
227 * We implement the synchronization such that no two cores
228 * can be measuring NB events using the same counters. Thus,
229 * we maintain a per-NB allocation table. The available slot
230 * is propagated using the event_constraint structure.
231 *
232 * We provide only one choice for each NB event based on
233 * the fact that only NB events have restrictions. Consequently,
234 * if a counter is available, there is a guarantee the NB event
235 * will be assigned to it. If no slot is available, an empty
236 * constraint is returned and scheduling will eventually fail
237 * for this event.
238 *
239 * Note that all cores attached the same NB compete for the same
240 * counters to host NB events, this is why we use atomic ops. Some
241 * multi-chip CPUs may have more than one NB.
242 *
243 * Given that resources are allocated (cmpxchg), they must be
244 * eventually freed for others to use. This is accomplished by
245 * calling amd_put_event_constraints().
246 *
247 * Non NB events are not impacted by this restriction.
248 */
249static struct event_constraint *
250amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
251{
252 struct hw_perf_event *hwc = &event->hw;
253 struct amd_nb *nb = cpuc->amd_nb;
254 struct perf_event *old = NULL;
948b1bb8 255 int max = x86_pmu.num_counters;
f22f54f4
PZ
256 int i, j, k = -1;
257
258 /*
259 * if not NB event or no NB, then no constraints
260 */
b38b24ea 261 if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
f22f54f4
PZ
262 return &unconstrained;
263
264 /*
265 * detect if already present, if so reuse
266 *
267 * cannot merge with actual allocation
268 * because of possible holes
269 *
270 * event can already be present yet not assigned (in hwc->idx)
271 * because of successive calls to x86_schedule_events() from
272 * hw_perf_group_sched_in() without hw_perf_enable()
273 */
274 for (i = 0; i < max; i++) {
275 /*
276 * keep track of first free slot
277 */
278 if (k == -1 && !nb->owners[i])
279 k = i;
280
281 /* already present, reuse */
282 if (nb->owners[i] == event)
283 goto done;
284 }
285 /*
286 * not present, so grab a new slot
287 * starting either at:
288 */
289 if (hwc->idx != -1) {
290 /* previous assignment */
291 i = hwc->idx;
292 } else if (k != -1) {
293 /* start from free slot found */
294 i = k;
295 } else {
296 /*
297 * event not found, no slot found in
298 * first pass, try again from the
299 * beginning
300 */
301 i = 0;
302 }
303 j = i;
304 do {
305 old = cmpxchg(nb->owners+i, NULL, event);
306 if (!old)
307 break;
308 if (++i == max)
309 i = 0;
310 } while (i != j);
311done:
312 if (!old)
313 return &nb->event_constraints[i];
314
315 return &emptyconstraint;
316}
317
c079c791 318static struct amd_nb *amd_alloc_nb(int cpu)
f22f54f4
PZ
319{
320 struct amd_nb *nb;
321 int i;
322
034c6efa
PZ
323 nb = kmalloc_node(sizeof(struct amd_nb), GFP_KERNEL | __GFP_ZERO,
324 cpu_to_node(cpu));
f22f54f4
PZ
325 if (!nb)
326 return NULL;
327
c079c791 328 nb->nb_id = -1;
f22f54f4
PZ
329
330 /*
331 * initialize all possible NB constraints
332 */
948b1bb8 333 for (i = 0; i < x86_pmu.num_counters; i++) {
34538ee7 334 __set_bit(i, nb->event_constraints[i].idxmsk);
f22f54f4
PZ
335 nb->event_constraints[i].weight = 1;
336 }
337 return nb;
338}
339
b38b24ea
PZ
340static int amd_pmu_cpu_prepare(int cpu)
341{
342 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
343
344 WARN_ON_ONCE(cpuc->amd_nb);
345
346 if (boot_cpu_data.x86_max_cores < 2)
347 return NOTIFY_OK;
348
c079c791 349 cpuc->amd_nb = amd_alloc_nb(cpu);
b38b24ea
PZ
350 if (!cpuc->amd_nb)
351 return NOTIFY_BAD;
352
353 return NOTIFY_OK;
354}
355
356static void amd_pmu_cpu_starting(int cpu)
f22f54f4 357{
b38b24ea
PZ
358 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
359 struct amd_nb *nb;
f22f54f4
PZ
360 int i, nb_id;
361
1018faa6
JR
362 cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
363
364 if (boot_cpu_data.x86_max_cores < 2 || boot_cpu_data.x86 == 0x15)
f22f54f4
PZ
365 return;
366
f22f54f4 367 nb_id = amd_get_nb_id(cpu);
b38b24ea 368 WARN_ON_ONCE(nb_id == BAD_APICID);
f22f54f4 369
f22f54f4 370 for_each_online_cpu(i) {
b38b24ea
PZ
371 nb = per_cpu(cpu_hw_events, i).amd_nb;
372 if (WARN_ON_ONCE(!nb))
f22f54f4 373 continue;
f22f54f4 374
b38b24ea 375 if (nb->nb_id == nb_id) {
7fdba1ca 376 cpuc->kfree_on_online = cpuc->amd_nb;
b38b24ea
PZ
377 cpuc->amd_nb = nb;
378 break;
379 }
f22f54f4 380 }
b38b24ea
PZ
381
382 cpuc->amd_nb->nb_id = nb_id;
383 cpuc->amd_nb->refcnt++;
f22f54f4
PZ
384}
385
b38b24ea 386static void amd_pmu_cpu_dead(int cpu)
f22f54f4
PZ
387{
388 struct cpu_hw_events *cpuhw;
389
390 if (boot_cpu_data.x86_max_cores < 2)
391 return;
392
393 cpuhw = &per_cpu(cpu_hw_events, cpu);
394
a90110c6 395 if (cpuhw->amd_nb) {
b38b24ea
PZ
396 struct amd_nb *nb = cpuhw->amd_nb;
397
398 if (nb->nb_id == -1 || --nb->refcnt == 0)
399 kfree(nb);
f22f54f4 400
a90110c6
RW
401 cpuhw->amd_nb = NULL;
402 }
f22f54f4
PZ
403}
404
641cc938
JO
405PMU_FORMAT_ATTR(event, "config:0-7,32-35");
406PMU_FORMAT_ATTR(umask, "config:8-15" );
407PMU_FORMAT_ATTR(edge, "config:18" );
408PMU_FORMAT_ATTR(inv, "config:23" );
409PMU_FORMAT_ATTR(cmask, "config:24-31" );
410
411static struct attribute *amd_format_attr[] = {
412 &format_attr_event.attr,
413 &format_attr_umask.attr,
414 &format_attr_edge.attr,
415 &format_attr_inv.attr,
416 &format_attr_cmask.attr,
417 NULL,
418};
419
caaa8be3 420static __initconst const struct x86_pmu amd_pmu = {
3f6da390
PZ
421 .name = "AMD",
422 .handle_irq = x86_pmu_handle_irq,
423 .disable_all = x86_pmu_disable_all,
424 .enable_all = x86_pmu_enable_all,
425 .enable = x86_pmu_enable_event,
426 .disable = x86_pmu_disable_event,
b4cdc5c2 427 .hw_config = amd_pmu_hw_config,
a072738e 428 .schedule_events = x86_schedule_events,
3f6da390
PZ
429 .eventsel = MSR_K7_EVNTSEL0,
430 .perfctr = MSR_K7_PERFCTR0,
431 .event_map = amd_pmu_event_map,
3f6da390 432 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
ee5789db 433 .num_counters = AMD64_NUM_COUNTERS,
948b1bb8
RR
434 .cntval_bits = 48,
435 .cntval_mask = (1ULL << 48) - 1,
3f6da390
PZ
436 .apic = 1,
437 /* use highest bit to detect overflow */
438 .max_period = (1ULL << 47) - 1,
439 .get_event_constraints = amd_get_event_constraints,
440 .put_event_constraints = amd_put_event_constraints,
441
641cc938
JO
442 .format_attrs = amd_format_attr,
443
b38b24ea
PZ
444 .cpu_prepare = amd_pmu_cpu_prepare,
445 .cpu_starting = amd_pmu_cpu_starting,
446 .cpu_dead = amd_pmu_cpu_dead,
3f6da390
PZ
447};
448
4979d272
RR
449/* AMD Family 15h */
450
451#define AMD_EVENT_TYPE_MASK 0x000000F0ULL
452
453#define AMD_EVENT_FP 0x00000000ULL ... 0x00000010ULL
454#define AMD_EVENT_LS 0x00000020ULL ... 0x00000030ULL
455#define AMD_EVENT_DC 0x00000040ULL ... 0x00000050ULL
456#define AMD_EVENT_CU 0x00000060ULL ... 0x00000070ULL
457#define AMD_EVENT_IC_DE 0x00000080ULL ... 0x00000090ULL
458#define AMD_EVENT_EX_LS 0x000000C0ULL
459#define AMD_EVENT_DE 0x000000D0ULL
460#define AMD_EVENT_NB 0x000000E0ULL ... 0x000000F0ULL
461
462/*
463 * AMD family 15h event code/PMC mappings:
464 *
465 * type = event_code & 0x0F0:
466 *
467 * 0x000 FP PERF_CTL[5:3]
468 * 0x010 FP PERF_CTL[5:3]
469 * 0x020 LS PERF_CTL[5:0]
470 * 0x030 LS PERF_CTL[5:0]
471 * 0x040 DC PERF_CTL[5:0]
472 * 0x050 DC PERF_CTL[5:0]
473 * 0x060 CU PERF_CTL[2:0]
474 * 0x070 CU PERF_CTL[2:0]
475 * 0x080 IC/DE PERF_CTL[2:0]
476 * 0x090 IC/DE PERF_CTL[2:0]
477 * 0x0A0 ---
478 * 0x0B0 ---
479 * 0x0C0 EX/LS PERF_CTL[5:0]
480 * 0x0D0 DE PERF_CTL[2:0]
481 * 0x0E0 NB NB_PERF_CTL[3:0]
482 * 0x0F0 NB NB_PERF_CTL[3:0]
483 *
484 * Exceptions:
485 *
855357a2 486 * 0x000 FP PERF_CTL[3], PERF_CTL[5:3] (*)
4979d272 487 * 0x003 FP PERF_CTL[3]
855357a2 488 * 0x004 FP PERF_CTL[3], PERF_CTL[5:3] (*)
4979d272
RR
489 * 0x00B FP PERF_CTL[3]
490 * 0x00D FP PERF_CTL[3]
491 * 0x023 DE PERF_CTL[2:0]
492 * 0x02D LS PERF_CTL[3]
493 * 0x02E LS PERF_CTL[3,0]
494 * 0x043 CU PERF_CTL[2:0]
495 * 0x045 CU PERF_CTL[2:0]
496 * 0x046 CU PERF_CTL[2:0]
497 * 0x054 CU PERF_CTL[2:0]
498 * 0x055 CU PERF_CTL[2:0]
499 * 0x08F IC PERF_CTL[0]
500 * 0x187 DE PERF_CTL[0]
501 * 0x188 DE PERF_CTL[0]
502 * 0x0DB EX PERF_CTL[5:0]
503 * 0x0DC LS PERF_CTL[5:0]
504 * 0x0DD LS PERF_CTL[5:0]
505 * 0x0DE LS PERF_CTL[5:0]
506 * 0x0DF LS PERF_CTL[5:0]
507 * 0x1D6 EX PERF_CTL[5:0]
508 * 0x1D8 EX PERF_CTL[5:0]
855357a2
RR
509 *
510 * (*) depending on the umask all FPU counters may be used
4979d272
RR
511 */
512
513static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0);
514static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0);
515static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0);
bc1738f6 516static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
4979d272
RR
517static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
518static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
519
520static struct event_constraint *
521amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event)
522{
855357a2
RR
523 struct hw_perf_event *hwc = &event->hw;
524 unsigned int event_code = amd_get_event_code(hwc);
4979d272
RR
525
526 switch (event_code & AMD_EVENT_TYPE_MASK) {
527 case AMD_EVENT_FP:
528 switch (event_code) {
855357a2
RR
529 case 0x000:
530 if (!(hwc->config & 0x0000F000ULL))
531 break;
532 if (!(hwc->config & 0x00000F00ULL))
533 break;
534 return &amd_f15_PMC3;
535 case 0x004:
536 if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
537 break;
538 return &amd_f15_PMC3;
4979d272
RR
539 case 0x003:
540 case 0x00B:
541 case 0x00D:
542 return &amd_f15_PMC3;
4979d272 543 }
855357a2 544 return &amd_f15_PMC53;
4979d272
RR
545 case AMD_EVENT_LS:
546 case AMD_EVENT_DC:
547 case AMD_EVENT_EX_LS:
548 switch (event_code) {
549 case 0x023:
550 case 0x043:
551 case 0x045:
552 case 0x046:
553 case 0x054:
554 case 0x055:
555 return &amd_f15_PMC20;
556 case 0x02D:
557 return &amd_f15_PMC3;
558 case 0x02E:
559 return &amd_f15_PMC30;
560 default:
561 return &amd_f15_PMC50;
562 }
563 case AMD_EVENT_CU:
564 case AMD_EVENT_IC_DE:
565 case AMD_EVENT_DE:
566 switch (event_code) {
567 case 0x08F:
568 case 0x187:
569 case 0x188:
570 return &amd_f15_PMC0;
571 case 0x0DB ... 0x0DF:
572 case 0x1D6:
573 case 0x1D8:
574 return &amd_f15_PMC50;
575 default:
576 return &amd_f15_PMC20;
577 }
578 case AMD_EVENT_NB:
579 /* not yet implemented */
580 return &emptyconstraint;
581 default:
582 return &emptyconstraint;
583 }
584}
585
586static __initconst const struct x86_pmu amd_pmu_f15h = {
587 .name = "AMD Family 15h",
588 .handle_irq = x86_pmu_handle_irq,
589 .disable_all = x86_pmu_disable_all,
590 .enable_all = x86_pmu_enable_all,
591 .enable = x86_pmu_enable_event,
592 .disable = x86_pmu_disable_event,
593 .hw_config = amd_pmu_hw_config,
594 .schedule_events = x86_schedule_events,
595 .eventsel = MSR_F15H_PERF_CTL,
596 .perfctr = MSR_F15H_PERF_CTR,
597 .event_map = amd_pmu_event_map,
598 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
ee5789db 599 .num_counters = AMD64_NUM_COUNTERS_F15H,
4979d272
RR
600 .cntval_bits = 48,
601 .cntval_mask = (1ULL << 48) - 1,
602 .apic = 1,
603 /* use highest bit to detect overflow */
604 .max_period = (1ULL << 47) - 1,
605 .get_event_constraints = amd_get_event_constraints_f15h,
606 /* nortbridge counters not yet implemented: */
607#if 0
608 .put_event_constraints = amd_put_event_constraints,
609
610 .cpu_prepare = amd_pmu_cpu_prepare,
4979d272
RR
611 .cpu_dead = amd_pmu_cpu_dead,
612#endif
1018faa6 613 .cpu_starting = amd_pmu_cpu_starting,
641cc938 614 .format_attrs = amd_format_attr,
4979d272
RR
615};
616
de0428a7 617__init int amd_pmu_init(void)
f22f54f4
PZ
618{
619 /* Performance-monitoring supported from K7 and later: */
620 if (boot_cpu_data.x86 < 6)
621 return -ENODEV;
622
4979d272
RR
623 /*
624 * If core performance counter extensions exists, it must be
625 * family 15h, otherwise fail. See x86_pmu_addr_offset().
626 */
627 switch (boot_cpu_data.x86) {
628 case 0x15:
629 if (!cpu_has_perfctr_core)
630 return -ENODEV;
631 x86_pmu = amd_pmu_f15h;
632 break;
633 default:
634 if (cpu_has_perfctr_core)
635 return -ENODEV;
636 x86_pmu = amd_pmu;
637 break;
638 }
f22f54f4
PZ
639
640 /* Events are common for all AMDs */
641 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
642 sizeof(hw_cache_event_ids));
643
f22f54f4
PZ
644 return 0;
645}
1018faa6
JR
646
647void amd_pmu_enable_virt(void)
648{
649 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
650
651 cpuc->perf_ctr_virt_mask = 0;
652
653 /* Reload all events */
654 x86_pmu_disable_all();
655 x86_pmu_enable_all(0);
656}
657EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
658
659void amd_pmu_disable_virt(void)
660{
661 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
662
663 /*
664 * We only mask out the Host-only bit so that host-only counting works
665 * when SVM is disabled. If someone sets up a guest-only counter when
666 * SVM is disabled the Guest-only bits still gets set and the counter
667 * will not count anything.
668 */
669 cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
670
671 /* Reload all events */
672 x86_pmu_disable_all();
673 x86_pmu_enable_all(0);
674}
675EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);
This page took 0.121683 seconds and 5 git commands to generate.