perf/x86: Implement IBS pmu control ops
[deliverable/linux.git] / arch / x86 / kernel / cpu / perf_event_amd_ibs.c
1 /*
2 * Performance events - AMD IBS
3 *
4 * Copyright (C) 2011 Advanced Micro Devices, Inc., Robert Richter
5 *
6 * For licencing details see kernel-base/COPYING
7 */
8
9 #include <linux/perf_event.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12
13 #include <asm/apic.h>
14
15 static u32 ibs_caps;
16
17 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
18
19 #include <linux/kprobes.h>
20 #include <linux/hardirq.h>
21
22 #include <asm/nmi.h>
23
24 #define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT)
25 #define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT
26
27 enum ibs_states {
28 IBS_ENABLED = 0,
29 IBS_STARTED = 1,
30 IBS_STOPPING = 2,
31
32 IBS_MAX_STATES,
33 };
34
35 struct cpu_perf_ibs {
36 struct perf_event *event;
37 unsigned long state[BITS_TO_LONGS(IBS_MAX_STATES)];
38 };
39
40 struct perf_ibs {
41 struct pmu pmu;
42 unsigned int msr;
43 u64 config_mask;
44 u64 cnt_mask;
45 u64 enable_mask;
46 u64 valid_mask;
47 unsigned long offset_mask[1];
48 int offset_max;
49 struct cpu_perf_ibs __percpu *pcpu;
50 };
51
52 struct perf_ibs_data {
53 u32 size;
54 union {
55 u32 data[0]; /* data buffer starts here */
56 u32 caps;
57 };
58 u64 regs[MSR_AMD64_IBS_REG_COUNT_MAX];
59 };
60
61 static struct perf_ibs perf_ibs_fetch;
62 static struct perf_ibs perf_ibs_op;
63
64 static struct perf_ibs *get_ibs_pmu(int type)
65 {
66 if (perf_ibs_fetch.pmu.type == type)
67 return &perf_ibs_fetch;
68 if (perf_ibs_op.pmu.type == type)
69 return &perf_ibs_op;
70 return NULL;
71 }
72
73 static int perf_ibs_init(struct perf_event *event)
74 {
75 struct hw_perf_event *hwc = &event->hw;
76 struct perf_ibs *perf_ibs;
77 u64 max_cnt, config;
78
79 perf_ibs = get_ibs_pmu(event->attr.type);
80 if (!perf_ibs)
81 return -ENOENT;
82
83 config = event->attr.config;
84 if (config & ~perf_ibs->config_mask)
85 return -EINVAL;
86
87 if (hwc->sample_period) {
88 if (config & perf_ibs->cnt_mask)
89 /* raw max_cnt may not be set */
90 return -EINVAL;
91 if (hwc->sample_period & 0x0f)
92 /* lower 4 bits can not be set in ibs max cnt */
93 return -EINVAL;
94 max_cnt = hwc->sample_period >> 4;
95 if (max_cnt & ~perf_ibs->cnt_mask)
96 /* out of range */
97 return -EINVAL;
98 config |= max_cnt;
99 } else {
100 max_cnt = config & perf_ibs->cnt_mask;
101 event->attr.sample_period = max_cnt << 4;
102 hwc->sample_period = event->attr.sample_period;
103 }
104
105 if (!max_cnt)
106 return -EINVAL;
107
108 hwc->config_base = perf_ibs->msr;
109 hwc->config = config;
110
111 return 0;
112 }
113
114 static void perf_ibs_start(struct perf_event *event, int flags)
115 {
116 struct hw_perf_event *hwc = &event->hw;
117 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
118 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
119
120 if (test_and_set_bit(IBS_STARTED, pcpu->state))
121 return;
122
123 wrmsrl(hwc->config_base, hwc->config | perf_ibs->enable_mask);
124 }
125
126 static void perf_ibs_stop(struct perf_event *event, int flags)
127 {
128 struct hw_perf_event *hwc = &event->hw;
129 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
130 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
131 u64 val;
132
133 if (!test_and_clear_bit(IBS_STARTED, pcpu->state))
134 return;
135
136 set_bit(IBS_STOPPING, pcpu->state);
137
138 rdmsrl(hwc->config_base, val);
139 val &= ~perf_ibs->enable_mask;
140 wrmsrl(hwc->config_base, val);
141 }
142
143 static int perf_ibs_add(struct perf_event *event, int flags)
144 {
145 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
146 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
147
148 if (test_and_set_bit(IBS_ENABLED, pcpu->state))
149 return -ENOSPC;
150
151 pcpu->event = event;
152
153 if (flags & PERF_EF_START)
154 perf_ibs_start(event, PERF_EF_RELOAD);
155
156 return 0;
157 }
158
159 static void perf_ibs_del(struct perf_event *event, int flags)
160 {
161 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
162 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
163
164 if (!test_and_clear_bit(IBS_ENABLED, pcpu->state))
165 return;
166
167 perf_ibs_stop(event, 0);
168
169 pcpu->event = NULL;
170 }
171
172 static void perf_ibs_read(struct perf_event *event) { }
173
174 static struct perf_ibs perf_ibs_fetch = {
175 .pmu = {
176 .task_ctx_nr = perf_invalid_context,
177
178 .event_init = perf_ibs_init,
179 .add = perf_ibs_add,
180 .del = perf_ibs_del,
181 .start = perf_ibs_start,
182 .stop = perf_ibs_stop,
183 .read = perf_ibs_read,
184 },
185 .msr = MSR_AMD64_IBSFETCHCTL,
186 .config_mask = IBS_FETCH_CONFIG_MASK,
187 .cnt_mask = IBS_FETCH_MAX_CNT,
188 .enable_mask = IBS_FETCH_ENABLE,
189 .valid_mask = IBS_FETCH_VAL,
190 .offset_mask = { MSR_AMD64_IBSFETCH_REG_MASK },
191 .offset_max = MSR_AMD64_IBSFETCH_REG_COUNT,
192 };
193
194 static struct perf_ibs perf_ibs_op = {
195 .pmu = {
196 .task_ctx_nr = perf_invalid_context,
197
198 .event_init = perf_ibs_init,
199 .add = perf_ibs_add,
200 .del = perf_ibs_del,
201 .start = perf_ibs_start,
202 .stop = perf_ibs_stop,
203 .read = perf_ibs_read,
204 },
205 .msr = MSR_AMD64_IBSOPCTL,
206 .config_mask = IBS_OP_CONFIG_MASK,
207 .cnt_mask = IBS_OP_MAX_CNT,
208 .enable_mask = IBS_OP_ENABLE,
209 .valid_mask = IBS_OP_VAL,
210 .offset_mask = { MSR_AMD64_IBSOP_REG_MASK },
211 .offset_max = MSR_AMD64_IBSOP_REG_COUNT,
212 };
213
214 static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
215 {
216 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
217 struct perf_event *event = pcpu->event;
218 struct hw_perf_event *hwc = &event->hw;
219 struct perf_sample_data data;
220 struct perf_raw_record raw;
221 struct pt_regs regs;
222 struct perf_ibs_data ibs_data;
223 int offset, size;
224 unsigned int msr;
225 u64 *buf;
226
227 if (!test_bit(IBS_STARTED, pcpu->state)) {
228 /* Catch spurious interrupts after stopping IBS: */
229 if (!test_and_clear_bit(IBS_STOPPING, pcpu->state))
230 return 0;
231 rdmsrl(perf_ibs->msr, *ibs_data.regs);
232 return (*ibs_data.regs & perf_ibs->valid_mask) ? 1 : 0;
233 }
234
235 msr = hwc->config_base;
236 buf = ibs_data.regs;
237 rdmsrl(msr, *buf);
238 if (!(*buf++ & perf_ibs->valid_mask))
239 return 0;
240
241 perf_sample_data_init(&data, 0);
242 if (event->attr.sample_type & PERF_SAMPLE_RAW) {
243 ibs_data.caps = ibs_caps;
244 size = 1;
245 offset = 1;
246 do {
247 rdmsrl(msr + offset, *buf++);
248 size++;
249 offset = find_next_bit(perf_ibs->offset_mask,
250 perf_ibs->offset_max,
251 offset + 1);
252 } while (offset < perf_ibs->offset_max);
253 raw.size = sizeof(u32) + sizeof(u64) * size;
254 raw.data = ibs_data.data;
255 data.raw = &raw;
256 }
257
258 regs = *iregs; /* XXX: update ip from ibs sample */
259
260 if (perf_event_overflow(event, &data, &regs))
261 ; /* stop */
262 else
263 /* reenable */
264 wrmsrl(hwc->config_base, hwc->config | perf_ibs->enable_mask);
265
266 return 1;
267 }
268
269 static int __kprobes
270 perf_ibs_nmi_handler(unsigned int cmd, struct pt_regs *regs)
271 {
272 int handled = 0;
273
274 handled += perf_ibs_handle_irq(&perf_ibs_fetch, regs);
275 handled += perf_ibs_handle_irq(&perf_ibs_op, regs);
276
277 if (handled)
278 inc_irq_stat(apic_perf_irqs);
279
280 return handled;
281 }
282
283 static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name)
284 {
285 struct cpu_perf_ibs __percpu *pcpu;
286 int ret;
287
288 pcpu = alloc_percpu(struct cpu_perf_ibs);
289 if (!pcpu)
290 return -ENOMEM;
291
292 perf_ibs->pcpu = pcpu;
293
294 ret = perf_pmu_register(&perf_ibs->pmu, name, -1);
295 if (ret) {
296 perf_ibs->pcpu = NULL;
297 free_percpu(pcpu);
298 }
299
300 return ret;
301 }
302
303 static __init int perf_event_ibs_init(void)
304 {
305 if (!ibs_caps)
306 return -ENODEV; /* ibs not supported by the cpu */
307
308 perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
309 perf_ibs_pmu_init(&perf_ibs_op, "ibs_op");
310 register_nmi_handler(NMI_LOCAL, &perf_ibs_nmi_handler, 0, "perf_ibs");
311 printk(KERN_INFO "perf: AMD IBS detected (0x%08x)\n", ibs_caps);
312
313 return 0;
314 }
315
316 #else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */
317
318 static __init int perf_event_ibs_init(void) { return 0; }
319
320 #endif
321
322 /* IBS - apic initialization, for perf and oprofile */
323
324 static __init u32 __get_ibs_caps(void)
325 {
326 u32 caps;
327 unsigned int max_level;
328
329 if (!boot_cpu_has(X86_FEATURE_IBS))
330 return 0;
331
332 /* check IBS cpuid feature flags */
333 max_level = cpuid_eax(0x80000000);
334 if (max_level < IBS_CPUID_FEATURES)
335 return IBS_CAPS_DEFAULT;
336
337 caps = cpuid_eax(IBS_CPUID_FEATURES);
338 if (!(caps & IBS_CAPS_AVAIL))
339 /* cpuid flags not valid */
340 return IBS_CAPS_DEFAULT;
341
342 return caps;
343 }
344
345 u32 get_ibs_caps(void)
346 {
347 return ibs_caps;
348 }
349
350 EXPORT_SYMBOL(get_ibs_caps);
351
352 static inline int get_eilvt(int offset)
353 {
354 return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1);
355 }
356
357 static inline int put_eilvt(int offset)
358 {
359 return !setup_APIC_eilvt(offset, 0, 0, 1);
360 }
361
362 /*
363 * Check and reserve APIC extended interrupt LVT offset for IBS if available.
364 */
365 static inline int ibs_eilvt_valid(void)
366 {
367 int offset;
368 u64 val;
369 int valid = 0;
370
371 preempt_disable();
372
373 rdmsrl(MSR_AMD64_IBSCTL, val);
374 offset = val & IBSCTL_LVT_OFFSET_MASK;
375
376 if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
377 pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
378 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
379 goto out;
380 }
381
382 if (!get_eilvt(offset)) {
383 pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
384 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
385 goto out;
386 }
387
388 valid = 1;
389 out:
390 preempt_enable();
391
392 return valid;
393 }
394
395 static int setup_ibs_ctl(int ibs_eilvt_off)
396 {
397 struct pci_dev *cpu_cfg;
398 int nodes;
399 u32 value = 0;
400
401 nodes = 0;
402 cpu_cfg = NULL;
403 do {
404 cpu_cfg = pci_get_device(PCI_VENDOR_ID_AMD,
405 PCI_DEVICE_ID_AMD_10H_NB_MISC,
406 cpu_cfg);
407 if (!cpu_cfg)
408 break;
409 ++nodes;
410 pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off
411 | IBSCTL_LVT_OFFSET_VALID);
412 pci_read_config_dword(cpu_cfg, IBSCTL, &value);
413 if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) {
414 pci_dev_put(cpu_cfg);
415 printk(KERN_DEBUG "Failed to setup IBS LVT offset, "
416 "IBSCTL = 0x%08x\n", value);
417 return -EINVAL;
418 }
419 } while (1);
420
421 if (!nodes) {
422 printk(KERN_DEBUG "No CPU node configured for IBS\n");
423 return -ENODEV;
424 }
425
426 return 0;
427 }
428
429 /*
430 * This runs only on the current cpu. We try to find an LVT offset and
431 * setup the local APIC. For this we must disable preemption. On
432 * success we initialize all nodes with this offset. This updates then
433 * the offset in the IBS_CTL per-node msr. The per-core APIC setup of
434 * the IBS interrupt vector is handled by perf_ibs_cpu_notifier that
435 * is using the new offset.
436 */
437 static int force_ibs_eilvt_setup(void)
438 {
439 int offset;
440 int ret;
441
442 preempt_disable();
443 /* find the next free available EILVT entry, skip offset 0 */
444 for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) {
445 if (get_eilvt(offset))
446 break;
447 }
448 preempt_enable();
449
450 if (offset == APIC_EILVT_NR_MAX) {
451 printk(KERN_DEBUG "No EILVT entry available\n");
452 return -EBUSY;
453 }
454
455 ret = setup_ibs_ctl(offset);
456 if (ret)
457 goto out;
458
459 if (!ibs_eilvt_valid()) {
460 ret = -EFAULT;
461 goto out;
462 }
463
464 pr_info("IBS: LVT offset %d assigned\n", offset);
465
466 return 0;
467 out:
468 preempt_disable();
469 put_eilvt(offset);
470 preempt_enable();
471 return ret;
472 }
473
474 static inline int get_ibs_lvt_offset(void)
475 {
476 u64 val;
477
478 rdmsrl(MSR_AMD64_IBSCTL, val);
479 if (!(val & IBSCTL_LVT_OFFSET_VALID))
480 return -EINVAL;
481
482 return val & IBSCTL_LVT_OFFSET_MASK;
483 }
484
485 static void setup_APIC_ibs(void *dummy)
486 {
487 int offset;
488
489 offset = get_ibs_lvt_offset();
490 if (offset < 0)
491 goto failed;
492
493 if (!setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 0))
494 return;
495 failed:
496 pr_warn("perf: IBS APIC setup failed on cpu #%d\n",
497 smp_processor_id());
498 }
499
500 static void clear_APIC_ibs(void *dummy)
501 {
502 int offset;
503
504 offset = get_ibs_lvt_offset();
505 if (offset >= 0)
506 setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
507 }
508
509 static int __cpuinit
510 perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
511 {
512 switch (action & ~CPU_TASKS_FROZEN) {
513 case CPU_STARTING:
514 setup_APIC_ibs(NULL);
515 break;
516 case CPU_DYING:
517 clear_APIC_ibs(NULL);
518 break;
519 default:
520 break;
521 }
522
523 return NOTIFY_OK;
524 }
525
526 static __init int amd_ibs_init(void)
527 {
528 u32 caps;
529 int ret = -EINVAL;
530
531 caps = __get_ibs_caps();
532 if (!caps)
533 return -ENODEV; /* ibs not supported by the cpu */
534
535 /*
536 * Force LVT offset assignment for family 10h: The offsets are
537 * not assigned by the BIOS for this family, so the OS is
538 * responsible for doing it. If the OS assignment fails, fall
539 * back to BIOS settings and try to setup this.
540 */
541 if (boot_cpu_data.x86 == 0x10)
542 force_ibs_eilvt_setup();
543
544 if (!ibs_eilvt_valid())
545 goto out;
546
547 get_online_cpus();
548 ibs_caps = caps;
549 /* make ibs_caps visible to other cpus: */
550 smp_mb();
551 perf_cpu_notifier(perf_ibs_cpu_notifier);
552 smp_call_function(setup_APIC_ibs, NULL, 1);
553 put_online_cpus();
554
555 ret = perf_event_ibs_init();
556 out:
557 if (ret)
558 pr_err("Failed to setup IBS, %d\n", ret);
559 return ret;
560 }
561
562 /* Since we need the pci subsystem to init ibs we can't do this earlier: */
563 device_initcall(amd_ibs_init);
This page took 0.068519 seconds and 5 git commands to generate.