Commit | Line | Data |
---|---|---|
b7169166 RR |
1 | /* |
2 | * Performance events - AMD IBS | |
3 | * | |
4 | * Copyright (C) 2011 Advanced Micro Devices, Inc., Robert Richter | |
5 | * | |
6 | * For licencing details see kernel-base/COPYING | |
7 | */ | |
8 | ||
9 | #include <linux/perf_event.h> | |
10 | #include <linux/module.h> | |
11 | #include <linux/pci.h> | |
12 | ||
13 | #include <asm/apic.h> | |
14 | ||
15 | static u32 ibs_caps; | |
16 | ||
17 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) | |
18 | ||
b7074f1f RR |
19 | #include <linux/kprobes.h> |
20 | #include <linux/hardirq.h> | |
21 | ||
22 | #include <asm/nmi.h> | |
23 | ||
51041943 RR |
24 | #define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT) |
25 | #define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT | |
26 | ||
4db2e8e6 RR |
27 | enum ibs_states { |
28 | IBS_ENABLED = 0, | |
29 | IBS_STARTED = 1, | |
30 | IBS_STOPPING = 2, | |
31 | ||
32 | IBS_MAX_STATES, | |
33 | }; | |
34 | ||
35 | struct cpu_perf_ibs { | |
36 | struct perf_event *event; | |
37 | unsigned long state[BITS_TO_LONGS(IBS_MAX_STATES)]; | |
38 | }; | |
39 | ||
51041943 RR |
40 | struct perf_ibs { |
41 | struct pmu pmu; | |
42 | unsigned int msr; | |
43 | u64 config_mask; | |
44 | u64 cnt_mask; | |
45 | u64 enable_mask; | |
b7074f1f | 46 | u64 valid_mask; |
db98c5fa | 47 | u64 max_period; |
b7074f1f RR |
48 | unsigned long offset_mask[1]; |
49 | int offset_max; | |
4db2e8e6 | 50 | struct cpu_perf_ibs __percpu *pcpu; |
db98c5fa | 51 | u64 (*get_count)(u64 config); |
b7074f1f RR |
52 | }; |
53 | ||
54 | struct perf_ibs_data { | |
55 | u32 size; | |
56 | union { | |
57 | u32 data[0]; /* data buffer starts here */ | |
58 | u32 caps; | |
59 | }; | |
60 | u64 regs[MSR_AMD64_IBS_REG_COUNT_MAX]; | |
51041943 RR |
61 | }; |
62 | ||
db98c5fa RR |
63 | static int |
64 | perf_event_set_period(struct hw_perf_event *hwc, u64 min, u64 max, u64 *count) | |
65 | { | |
66 | s64 left = local64_read(&hwc->period_left); | |
67 | s64 period = hwc->sample_period; | |
68 | int overflow = 0; | |
69 | ||
70 | /* | |
71 | * If we are way outside a reasonable range then just skip forward: | |
72 | */ | |
73 | if (unlikely(left <= -period)) { | |
74 | left = period; | |
75 | local64_set(&hwc->period_left, left); | |
76 | hwc->last_period = period; | |
77 | overflow = 1; | |
78 | } | |
79 | ||
80 | if (unlikely(left <= 0)) { | |
81 | left += period; | |
82 | local64_set(&hwc->period_left, left); | |
83 | hwc->last_period = period; | |
84 | overflow = 1; | |
85 | } | |
86 | ||
87 | if (unlikely(left < min)) | |
88 | left = min; | |
89 | ||
90 | if (left > max) | |
91 | left = max; | |
92 | ||
93 | *count = (u64)left; | |
94 | ||
95 | return overflow; | |
96 | } | |
97 | ||
98 | static int | |
99 | perf_event_try_update(struct perf_event *event, u64 new_raw_count, int width) | |
100 | { | |
101 | struct hw_perf_event *hwc = &event->hw; | |
102 | int shift = 64 - width; | |
103 | u64 prev_raw_count; | |
104 | u64 delta; | |
105 | ||
106 | /* | |
107 | * Careful: an NMI might modify the previous event value. | |
108 | * | |
109 | * Our tactic to handle this is to first atomically read and | |
110 | * exchange a new raw count - then add that new-prev delta | |
111 | * count to the generic event atomically: | |
112 | */ | |
113 | prev_raw_count = local64_read(&hwc->prev_count); | |
114 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, | |
115 | new_raw_count) != prev_raw_count) | |
116 | return 0; | |
117 | ||
118 | /* | |
119 | * Now we have the new raw value and have updated the prev | |
120 | * timestamp already. We can now calculate the elapsed delta | |
121 | * (event-)time and add that to the generic event. | |
122 | * | |
123 | * Careful, not all hw sign-extends above the physical width | |
124 | * of the count. | |
125 | */ | |
126 | delta = (new_raw_count << shift) - (prev_raw_count << shift); | |
127 | delta >>= shift; | |
128 | ||
129 | local64_add(delta, &event->count); | |
130 | local64_sub(delta, &hwc->period_left); | |
131 | ||
132 | return 1; | |
133 | } | |
134 | ||
51041943 RR |
135 | static struct perf_ibs perf_ibs_fetch; |
136 | static struct perf_ibs perf_ibs_op; | |
137 | ||
138 | static struct perf_ibs *get_ibs_pmu(int type) | |
139 | { | |
140 | if (perf_ibs_fetch.pmu.type == type) | |
141 | return &perf_ibs_fetch; | |
142 | if (perf_ibs_op.pmu.type == type) | |
143 | return &perf_ibs_op; | |
144 | return NULL; | |
145 | } | |
b7169166 RR |
146 | |
147 | static int perf_ibs_init(struct perf_event *event) | |
148 | { | |
51041943 RR |
149 | struct hw_perf_event *hwc = &event->hw; |
150 | struct perf_ibs *perf_ibs; | |
151 | u64 max_cnt, config; | |
152 | ||
153 | perf_ibs = get_ibs_pmu(event->attr.type); | |
154 | if (!perf_ibs) | |
b7169166 | 155 | return -ENOENT; |
51041943 RR |
156 | |
157 | config = event->attr.config; | |
158 | if (config & ~perf_ibs->config_mask) | |
159 | return -EINVAL; | |
160 | ||
161 | if (hwc->sample_period) { | |
162 | if (config & perf_ibs->cnt_mask) | |
163 | /* raw max_cnt may not be set */ | |
164 | return -EINVAL; | |
165 | if (hwc->sample_period & 0x0f) | |
166 | /* lower 4 bits can not be set in ibs max cnt */ | |
167 | return -EINVAL; | |
51041943 RR |
168 | } else { |
169 | max_cnt = config & perf_ibs->cnt_mask; | |
db98c5fa | 170 | config &= ~perf_ibs->cnt_mask; |
51041943 RR |
171 | event->attr.sample_period = max_cnt << 4; |
172 | hwc->sample_period = event->attr.sample_period; | |
173 | } | |
174 | ||
db98c5fa | 175 | if (!hwc->sample_period) |
51041943 RR |
176 | return -EINVAL; |
177 | ||
178 | hwc->config_base = perf_ibs->msr; | |
179 | hwc->config = config; | |
180 | ||
b7169166 RR |
181 | return 0; |
182 | } | |
183 | ||
db98c5fa RR |
184 | static int perf_ibs_set_period(struct perf_ibs *perf_ibs, |
185 | struct hw_perf_event *hwc, u64 *period) | |
186 | { | |
187 | int ret; | |
188 | ||
189 | /* ignore lower 4 bits in min count: */ | |
190 | ret = perf_event_set_period(hwc, 1<<4, perf_ibs->max_period, period); | |
191 | local64_set(&hwc->prev_count, 0); | |
192 | ||
193 | return ret; | |
194 | } | |
195 | ||
196 | static u64 get_ibs_fetch_count(u64 config) | |
197 | { | |
198 | return (config & IBS_FETCH_CNT) >> 12; | |
199 | } | |
200 | ||
201 | static u64 get_ibs_op_count(u64 config) | |
202 | { | |
203 | return (config & IBS_OP_CUR_CNT) >> 32; | |
204 | } | |
205 | ||
206 | static void | |
207 | perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event, | |
208 | u64 config) | |
209 | { | |
210 | u64 count = perf_ibs->get_count(config); | |
211 | ||
212 | while (!perf_event_try_update(event, count, 20)) { | |
213 | rdmsrl(event->hw.config_base, config); | |
214 | count = perf_ibs->get_count(config); | |
215 | } | |
216 | } | |
217 | ||
218 | /* Note: The enable mask must be encoded in the config argument. */ | |
219 | static inline void perf_ibs_enable_event(struct hw_perf_event *hwc, u64 config) | |
220 | { | |
221 | wrmsrl(hwc->config_base, hwc->config | config); | |
222 | } | |
223 | ||
224 | /* | |
225 | * We cannot restore the ibs pmu state, so we always needs to update | |
226 | * the event while stopping it and then reset the state when starting | |
227 | * again. Thus, ignoring PERF_EF_RELOAD and PERF_EF_UPDATE flags in | |
228 | * perf_ibs_start()/perf_ibs_stop() and instead always do it. | |
229 | */ | |
4db2e8e6 RR |
230 | static void perf_ibs_start(struct perf_event *event, int flags) |
231 | { | |
232 | struct hw_perf_event *hwc = &event->hw; | |
233 | struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu); | |
234 | struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu); | |
db98c5fa | 235 | u64 config; |
4db2e8e6 | 236 | |
db98c5fa | 237 | if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) |
4db2e8e6 RR |
238 | return; |
239 | ||
db98c5fa RR |
240 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); |
241 | hwc->state = 0; | |
242 | ||
243 | perf_ibs_set_period(perf_ibs, hwc, &config); | |
244 | config = (config >> 4) | perf_ibs->enable_mask; | |
245 | set_bit(IBS_STARTED, pcpu->state); | |
246 | perf_ibs_enable_event(hwc, config); | |
247 | ||
248 | perf_event_update_userpage(event); | |
4db2e8e6 RR |
249 | } |
250 | ||
251 | static void perf_ibs_stop(struct perf_event *event, int flags) | |
252 | { | |
253 | struct hw_perf_event *hwc = &event->hw; | |
254 | struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu); | |
255 | struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu); | |
256 | u64 val; | |
db98c5fa | 257 | int stopping; |
4db2e8e6 | 258 | |
db98c5fa | 259 | stopping = test_and_clear_bit(IBS_STARTED, pcpu->state); |
4db2e8e6 | 260 | |
db98c5fa RR |
261 | if (!stopping && (hwc->state & PERF_HES_UPTODATE)) |
262 | return; | |
4db2e8e6 RR |
263 | |
264 | rdmsrl(hwc->config_base, val); | |
db98c5fa RR |
265 | |
266 | if (stopping) { | |
267 | set_bit(IBS_STOPPING, pcpu->state); | |
268 | val &= ~perf_ibs->enable_mask; | |
269 | wrmsrl(hwc->config_base, val); | |
270 | WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); | |
271 | hwc->state |= PERF_HES_STOPPED; | |
272 | } | |
273 | ||
274 | if (hwc->state & PERF_HES_UPTODATE) | |
275 | return; | |
276 | ||
277 | perf_ibs_event_update(perf_ibs, event, val); | |
278 | hwc->state |= PERF_HES_UPTODATE; | |
4db2e8e6 RR |
279 | } |
280 | ||
b7169166 RR |
281 | static int perf_ibs_add(struct perf_event *event, int flags) |
282 | { | |
4db2e8e6 RR |
283 | struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu); |
284 | struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu); | |
285 | ||
286 | if (test_and_set_bit(IBS_ENABLED, pcpu->state)) | |
287 | return -ENOSPC; | |
288 | ||
db98c5fa RR |
289 | event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; |
290 | ||
4db2e8e6 RR |
291 | pcpu->event = event; |
292 | ||
293 | if (flags & PERF_EF_START) | |
294 | perf_ibs_start(event, PERF_EF_RELOAD); | |
295 | ||
b7169166 RR |
296 | return 0; |
297 | } | |
298 | ||
299 | static void perf_ibs_del(struct perf_event *event, int flags) | |
300 | { | |
4db2e8e6 RR |
301 | struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu); |
302 | struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu); | |
303 | ||
304 | if (!test_and_clear_bit(IBS_ENABLED, pcpu->state)) | |
305 | return; | |
306 | ||
db98c5fa | 307 | perf_ibs_stop(event, PERF_EF_UPDATE); |
4db2e8e6 RR |
308 | |
309 | pcpu->event = NULL; | |
db98c5fa RR |
310 | |
311 | perf_event_update_userpage(event); | |
b7169166 RR |
312 | } |
313 | ||
4db2e8e6 RR |
314 | static void perf_ibs_read(struct perf_event *event) { } |
315 | ||
51041943 RR |
316 | static struct perf_ibs perf_ibs_fetch = { |
317 | .pmu = { | |
318 | .task_ctx_nr = perf_invalid_context, | |
319 | ||
320 | .event_init = perf_ibs_init, | |
321 | .add = perf_ibs_add, | |
322 | .del = perf_ibs_del, | |
4db2e8e6 RR |
323 | .start = perf_ibs_start, |
324 | .stop = perf_ibs_stop, | |
325 | .read = perf_ibs_read, | |
51041943 RR |
326 | }, |
327 | .msr = MSR_AMD64_IBSFETCHCTL, | |
328 | .config_mask = IBS_FETCH_CONFIG_MASK, | |
329 | .cnt_mask = IBS_FETCH_MAX_CNT, | |
330 | .enable_mask = IBS_FETCH_ENABLE, | |
b7074f1f | 331 | .valid_mask = IBS_FETCH_VAL, |
db98c5fa | 332 | .max_period = IBS_FETCH_MAX_CNT << 4, |
b7074f1f RR |
333 | .offset_mask = { MSR_AMD64_IBSFETCH_REG_MASK }, |
334 | .offset_max = MSR_AMD64_IBSFETCH_REG_COUNT, | |
db98c5fa RR |
335 | |
336 | .get_count = get_ibs_fetch_count, | |
51041943 RR |
337 | }; |
338 | ||
339 | static struct perf_ibs perf_ibs_op = { | |
340 | .pmu = { | |
341 | .task_ctx_nr = perf_invalid_context, | |
342 | ||
343 | .event_init = perf_ibs_init, | |
344 | .add = perf_ibs_add, | |
345 | .del = perf_ibs_del, | |
4db2e8e6 RR |
346 | .start = perf_ibs_start, |
347 | .stop = perf_ibs_stop, | |
348 | .read = perf_ibs_read, | |
51041943 RR |
349 | }, |
350 | .msr = MSR_AMD64_IBSOPCTL, | |
351 | .config_mask = IBS_OP_CONFIG_MASK, | |
352 | .cnt_mask = IBS_OP_MAX_CNT, | |
353 | .enable_mask = IBS_OP_ENABLE, | |
b7074f1f | 354 | .valid_mask = IBS_OP_VAL, |
db98c5fa | 355 | .max_period = IBS_OP_MAX_CNT << 4, |
b7074f1f RR |
356 | .offset_mask = { MSR_AMD64_IBSOP_REG_MASK }, |
357 | .offset_max = MSR_AMD64_IBSOP_REG_COUNT, | |
db98c5fa RR |
358 | |
359 | .get_count = get_ibs_op_count, | |
b7169166 RR |
360 | }; |
361 | ||
b7074f1f RR |
362 | static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs) |
363 | { | |
4db2e8e6 RR |
364 | struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu); |
365 | struct perf_event *event = pcpu->event; | |
b7074f1f RR |
366 | struct hw_perf_event *hwc = &event->hw; |
367 | struct perf_sample_data data; | |
368 | struct perf_raw_record raw; | |
369 | struct pt_regs regs; | |
370 | struct perf_ibs_data ibs_data; | |
db98c5fa | 371 | int offset, size, overflow, reenable; |
b7074f1f | 372 | unsigned int msr; |
db98c5fa | 373 | u64 *buf, config; |
b7074f1f | 374 | |
4db2e8e6 RR |
375 | if (!test_bit(IBS_STARTED, pcpu->state)) { |
376 | /* Catch spurious interrupts after stopping IBS: */ | |
377 | if (!test_and_clear_bit(IBS_STOPPING, pcpu->state)) | |
378 | return 0; | |
379 | rdmsrl(perf_ibs->msr, *ibs_data.regs); | |
380 | return (*ibs_data.regs & perf_ibs->valid_mask) ? 1 : 0; | |
381 | } | |
382 | ||
b7074f1f RR |
383 | msr = hwc->config_base; |
384 | buf = ibs_data.regs; | |
385 | rdmsrl(msr, *buf); | |
386 | if (!(*buf++ & perf_ibs->valid_mask)) | |
387 | return 0; | |
388 | ||
c75841a3 RR |
389 | /* |
390 | * Emulate IbsOpCurCnt in MSRC001_1033 (IbsOpCtl), not | |
391 | * supported in all cpus. As this triggered an interrupt, we | |
392 | * set the current count to the max count. | |
393 | */ | |
394 | config = ibs_data.regs[0]; | |
395 | if (perf_ibs == &perf_ibs_op && !(ibs_caps & IBS_CAPS_RDWROPCNT)) { | |
396 | config &= ~IBS_OP_CUR_CNT; | |
397 | config |= (config & IBS_OP_MAX_CNT) << 36; | |
398 | } | |
399 | ||
400 | perf_ibs_event_update(perf_ibs, event, config); | |
b7074f1f | 401 | perf_sample_data_init(&data, 0); |
c75841a3 RR |
402 | data.period = event->hw.last_period; |
403 | ||
b7074f1f RR |
404 | if (event->attr.sample_type & PERF_SAMPLE_RAW) { |
405 | ibs_data.caps = ibs_caps; | |
406 | size = 1; | |
407 | offset = 1; | |
408 | do { | |
409 | rdmsrl(msr + offset, *buf++); | |
410 | size++; | |
411 | offset = find_next_bit(perf_ibs->offset_mask, | |
412 | perf_ibs->offset_max, | |
413 | offset + 1); | |
414 | } while (offset < perf_ibs->offset_max); | |
415 | raw.size = sizeof(u32) + sizeof(u64) * size; | |
416 | raw.data = ibs_data.data; | |
417 | data.raw = &raw; | |
418 | } | |
419 | ||
420 | regs = *iregs; /* XXX: update ip from ibs sample */ | |
421 | ||
db98c5fa RR |
422 | overflow = perf_ibs_set_period(perf_ibs, hwc, &config); |
423 | reenable = !(overflow && perf_event_overflow(event, &data, ®s)); | |
424 | config = (config >> 4) | (reenable ? perf_ibs->enable_mask : 0); | |
425 | perf_ibs_enable_event(hwc, config); | |
426 | ||
427 | perf_event_update_userpage(event); | |
b7074f1f RR |
428 | |
429 | return 1; | |
430 | } | |
431 | ||
432 | static int __kprobes | |
433 | perf_ibs_nmi_handler(unsigned int cmd, struct pt_regs *regs) | |
434 | { | |
435 | int handled = 0; | |
436 | ||
437 | handled += perf_ibs_handle_irq(&perf_ibs_fetch, regs); | |
438 | handled += perf_ibs_handle_irq(&perf_ibs_op, regs); | |
439 | ||
440 | if (handled) | |
441 | inc_irq_stat(apic_perf_irqs); | |
442 | ||
443 | return handled; | |
444 | } | |
445 | ||
4db2e8e6 RR |
446 | static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name) |
447 | { | |
448 | struct cpu_perf_ibs __percpu *pcpu; | |
449 | int ret; | |
450 | ||
451 | pcpu = alloc_percpu(struct cpu_perf_ibs); | |
452 | if (!pcpu) | |
453 | return -ENOMEM; | |
454 | ||
455 | perf_ibs->pcpu = pcpu; | |
456 | ||
457 | ret = perf_pmu_register(&perf_ibs->pmu, name, -1); | |
458 | if (ret) { | |
459 | perf_ibs->pcpu = NULL; | |
460 | free_percpu(pcpu); | |
461 | } | |
462 | ||
463 | return ret; | |
464 | } | |
465 | ||
b7169166 RR |
466 | static __init int perf_event_ibs_init(void) |
467 | { | |
468 | if (!ibs_caps) | |
469 | return -ENODEV; /* ibs not supported by the cpu */ | |
470 | ||
4db2e8e6 RR |
471 | perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch"); |
472 | perf_ibs_pmu_init(&perf_ibs_op, "ibs_op"); | |
fab06992 | 473 | register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs"); |
b7169166 RR |
474 | printk(KERN_INFO "perf: AMD IBS detected (0x%08x)\n", ibs_caps); |
475 | ||
476 | return 0; | |
477 | } | |
478 | ||
479 | #else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */ | |
480 | ||
481 | static __init int perf_event_ibs_init(void) { return 0; } | |
482 | ||
483 | #endif | |
484 | ||
485 | /* IBS - apic initialization, for perf and oprofile */ | |
486 | ||
487 | static __init u32 __get_ibs_caps(void) | |
488 | { | |
489 | u32 caps; | |
490 | unsigned int max_level; | |
491 | ||
492 | if (!boot_cpu_has(X86_FEATURE_IBS)) | |
493 | return 0; | |
494 | ||
495 | /* check IBS cpuid feature flags */ | |
496 | max_level = cpuid_eax(0x80000000); | |
497 | if (max_level < IBS_CPUID_FEATURES) | |
498 | return IBS_CAPS_DEFAULT; | |
499 | ||
500 | caps = cpuid_eax(IBS_CPUID_FEATURES); | |
501 | if (!(caps & IBS_CAPS_AVAIL)) | |
502 | /* cpuid flags not valid */ | |
503 | return IBS_CAPS_DEFAULT; | |
504 | ||
505 | return caps; | |
506 | } | |
507 | ||
508 | u32 get_ibs_caps(void) | |
509 | { | |
510 | return ibs_caps; | |
511 | } | |
512 | ||
513 | EXPORT_SYMBOL(get_ibs_caps); | |
514 | ||
515 | static inline int get_eilvt(int offset) | |
516 | { | |
517 | return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1); | |
518 | } | |
519 | ||
520 | static inline int put_eilvt(int offset) | |
521 | { | |
522 | return !setup_APIC_eilvt(offset, 0, 0, 1); | |
523 | } | |
524 | ||
525 | /* | |
526 | * Check and reserve APIC extended interrupt LVT offset for IBS if available. | |
527 | */ | |
528 | static inline int ibs_eilvt_valid(void) | |
529 | { | |
530 | int offset; | |
531 | u64 val; | |
532 | int valid = 0; | |
533 | ||
534 | preempt_disable(); | |
535 | ||
536 | rdmsrl(MSR_AMD64_IBSCTL, val); | |
537 | offset = val & IBSCTL_LVT_OFFSET_MASK; | |
538 | ||
539 | if (!(val & IBSCTL_LVT_OFFSET_VALID)) { | |
540 | pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n", | |
541 | smp_processor_id(), offset, MSR_AMD64_IBSCTL, val); | |
542 | goto out; | |
543 | } | |
544 | ||
545 | if (!get_eilvt(offset)) { | |
546 | pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n", | |
547 | smp_processor_id(), offset, MSR_AMD64_IBSCTL, val); | |
548 | goto out; | |
549 | } | |
550 | ||
551 | valid = 1; | |
552 | out: | |
553 | preempt_enable(); | |
554 | ||
555 | return valid; | |
556 | } | |
557 | ||
558 | static int setup_ibs_ctl(int ibs_eilvt_off) | |
559 | { | |
560 | struct pci_dev *cpu_cfg; | |
561 | int nodes; | |
562 | u32 value = 0; | |
563 | ||
564 | nodes = 0; | |
565 | cpu_cfg = NULL; | |
566 | do { | |
567 | cpu_cfg = pci_get_device(PCI_VENDOR_ID_AMD, | |
568 | PCI_DEVICE_ID_AMD_10H_NB_MISC, | |
569 | cpu_cfg); | |
570 | if (!cpu_cfg) | |
571 | break; | |
572 | ++nodes; | |
573 | pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off | |
574 | | IBSCTL_LVT_OFFSET_VALID); | |
575 | pci_read_config_dword(cpu_cfg, IBSCTL, &value); | |
576 | if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) { | |
577 | pci_dev_put(cpu_cfg); | |
578 | printk(KERN_DEBUG "Failed to setup IBS LVT offset, " | |
579 | "IBSCTL = 0x%08x\n", value); | |
580 | return -EINVAL; | |
581 | } | |
582 | } while (1); | |
583 | ||
584 | if (!nodes) { | |
585 | printk(KERN_DEBUG "No CPU node configured for IBS\n"); | |
586 | return -ENODEV; | |
587 | } | |
588 | ||
589 | return 0; | |
590 | } | |
591 | ||
592 | /* | |
593 | * This runs only on the current cpu. We try to find an LVT offset and | |
594 | * setup the local APIC. For this we must disable preemption. On | |
595 | * success we initialize all nodes with this offset. This updates then | |
596 | * the offset in the IBS_CTL per-node msr. The per-core APIC setup of | |
597 | * the IBS interrupt vector is handled by perf_ibs_cpu_notifier that | |
598 | * is using the new offset. | |
599 | */ | |
600 | static int force_ibs_eilvt_setup(void) | |
601 | { | |
602 | int offset; | |
603 | int ret; | |
604 | ||
605 | preempt_disable(); | |
606 | /* find the next free available EILVT entry, skip offset 0 */ | |
607 | for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) { | |
608 | if (get_eilvt(offset)) | |
609 | break; | |
610 | } | |
611 | preempt_enable(); | |
612 | ||
613 | if (offset == APIC_EILVT_NR_MAX) { | |
614 | printk(KERN_DEBUG "No EILVT entry available\n"); | |
615 | return -EBUSY; | |
616 | } | |
617 | ||
618 | ret = setup_ibs_ctl(offset); | |
619 | if (ret) | |
620 | goto out; | |
621 | ||
622 | if (!ibs_eilvt_valid()) { | |
623 | ret = -EFAULT; | |
624 | goto out; | |
625 | } | |
626 | ||
16e5294e | 627 | pr_info("IBS: LVT offset %d assigned\n", offset); |
b7169166 RR |
628 | |
629 | return 0; | |
630 | out: | |
631 | preempt_disable(); | |
632 | put_eilvt(offset); | |
633 | preempt_enable(); | |
634 | return ret; | |
635 | } | |
636 | ||
637 | static inline int get_ibs_lvt_offset(void) | |
638 | { | |
639 | u64 val; | |
640 | ||
641 | rdmsrl(MSR_AMD64_IBSCTL, val); | |
642 | if (!(val & IBSCTL_LVT_OFFSET_VALID)) | |
643 | return -EINVAL; | |
644 | ||
645 | return val & IBSCTL_LVT_OFFSET_MASK; | |
646 | } | |
647 | ||
648 | static void setup_APIC_ibs(void *dummy) | |
649 | { | |
650 | int offset; | |
651 | ||
652 | offset = get_ibs_lvt_offset(); | |
653 | if (offset < 0) | |
654 | goto failed; | |
655 | ||
656 | if (!setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 0)) | |
657 | return; | |
658 | failed: | |
659 | pr_warn("perf: IBS APIC setup failed on cpu #%d\n", | |
660 | smp_processor_id()); | |
661 | } | |
662 | ||
663 | static void clear_APIC_ibs(void *dummy) | |
664 | { | |
665 | int offset; | |
666 | ||
667 | offset = get_ibs_lvt_offset(); | |
668 | if (offset >= 0) | |
669 | setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1); | |
670 | } | |
671 | ||
672 | static int __cpuinit | |
673 | perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | |
674 | { | |
675 | switch (action & ~CPU_TASKS_FROZEN) { | |
676 | case CPU_STARTING: | |
677 | setup_APIC_ibs(NULL); | |
678 | break; | |
679 | case CPU_DYING: | |
680 | clear_APIC_ibs(NULL); | |
681 | break; | |
682 | default: | |
683 | break; | |
684 | } | |
685 | ||
686 | return NOTIFY_OK; | |
687 | } | |
688 | ||
689 | static __init int amd_ibs_init(void) | |
690 | { | |
691 | u32 caps; | |
16e5294e | 692 | int ret = -EINVAL; |
b7169166 RR |
693 | |
694 | caps = __get_ibs_caps(); | |
695 | if (!caps) | |
696 | return -ENODEV; /* ibs not supported by the cpu */ | |
697 | ||
16e5294e RR |
698 | /* |
699 | * Force LVT offset assignment for family 10h: The offsets are | |
700 | * not assigned by the BIOS for this family, so the OS is | |
701 | * responsible for doing it. If the OS assignment fails, fall | |
702 | * back to BIOS settings and try to setup this. | |
703 | */ | |
704 | if (boot_cpu_data.x86 == 0x10) | |
705 | force_ibs_eilvt_setup(); | |
706 | ||
707 | if (!ibs_eilvt_valid()) | |
708 | goto out; | |
b7169166 RR |
709 | |
710 | get_online_cpus(); | |
711 | ibs_caps = caps; | |
712 | /* make ibs_caps visible to other cpus: */ | |
713 | smp_mb(); | |
714 | perf_cpu_notifier(perf_ibs_cpu_notifier); | |
715 | smp_call_function(setup_APIC_ibs, NULL, 1); | |
716 | put_online_cpus(); | |
717 | ||
16e5294e RR |
718 | ret = perf_event_ibs_init(); |
719 | out: | |
720 | if (ret) | |
721 | pr_err("Failed to setup IBS, %d\n", ret); | |
722 | return ret; | |
b7169166 RR |
723 | } |
724 | ||
725 | /* Since we need the pci subsystem to init ibs we can't do this earlier: */ | |
726 | device_initcall(amd_ibs_init); |