2 * Performance events - AMD IBS
4 * Copyright (C) 2011 Advanced Micro Devices, Inc., Robert Richter
6 * For licencing details see kernel-base/COPYING
9 #include <linux/perf_event.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/ptrace.h>
13 #include <linux/syscore_ops.h>
17 #include "../perf_event.h"
21 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
23 #include <linux/kprobes.h>
24 #include <linux/hardirq.h>
28 #define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT)
29 #define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT
35 * ENABLED; tracks the pmu::add(), pmu::del() state, when set the counter is taken
36 * and any further add()s must fail.
38 * STARTED/STOPPING/STOPPED; deal with pmu::start(), pmu::stop() state but are
39 * complicated by the fact that the IBS hardware can send late NMIs (ie. after
40 * we've cleared the EN bit).
42 * In order to consume these late NMIs we have the STOPPED state, any NMI that
43 * happens after we've cleared the EN state will clear this bit and report the
44 * NMI handled (this is fundamentally racy in the face or multiple NMI sources,
45 * someone else can consume our BIT and our NMI will go unhandled).
47 * And since we cannot set/clear this separate bit together with the EN bit,
48 * there are races; if we cleared STARTED early, an NMI could land in
49 * between clearing STARTED and clearing the EN bit (in fact multiple NMIs
50 * could happen if the period is small enough), and consume our STOPPED bit
51 * and trigger streams of unhandled NMIs.
53 * If, however, we clear STARTED late, an NMI can hit between clearing the
54 * EN bit and clearing STARTED, still see STARTED set and process the event.
55 * If this event will have the VALID bit clear, we bail properly, but this
56 * is not a given. With VALID set we can end up calling pmu::stop() again
57 * (the throttle logic) and trigger the WARNs in there.
59 * So what we do is set STOPPING before clearing EN to avoid the pmu::stop()
60 * nesting, and clear STARTED late, so that we have a well defined state over
61 * the clearing of the EN bit.
63 * XXX: we could probably be using !atomic bitops for all this.
76 struct perf_event
*event
;
77 unsigned long state
[BITS_TO_LONGS(IBS_MAX_STATES
)];
88 unsigned long offset_mask
[1];
90 struct cpu_perf_ibs __percpu
*pcpu
;
92 struct attribute
**format_attrs
;
93 struct attribute_group format_group
;
94 const struct attribute_group
*attr_groups
[2];
96 u64 (*get_count
)(u64 config
);
99 struct perf_ibs_data
{
102 u32 data
[0]; /* data buffer starts here */
105 u64 regs
[MSR_AMD64_IBS_REG_COUNT_MAX
];
109 perf_event_set_period(struct hw_perf_event
*hwc
, u64 min
, u64 max
, u64
*hw_period
)
111 s64 left
= local64_read(&hwc
->period_left
);
112 s64 period
= hwc
->sample_period
;
116 * If we are way outside a reasonable range then just skip forward:
118 if (unlikely(left
<= -period
)) {
120 local64_set(&hwc
->period_left
, left
);
121 hwc
->last_period
= period
;
125 if (unlikely(left
< (s64
)min
)) {
127 local64_set(&hwc
->period_left
, left
);
128 hwc
->last_period
= period
;
133 * If the hw period that triggers the sw overflow is too short
134 * we might hit the irq handler. This biases the results.
135 * Thus we shorten the next-to-last period and set the last
136 * period to the max period.
146 *hw_period
= (u64
)left
;
152 perf_event_try_update(struct perf_event
*event
, u64 new_raw_count
, int width
)
154 struct hw_perf_event
*hwc
= &event
->hw
;
155 int shift
= 64 - width
;
160 * Careful: an NMI might modify the previous event value.
162 * Our tactic to handle this is to first atomically read and
163 * exchange a new raw count - then add that new-prev delta
164 * count to the generic event atomically:
166 prev_raw_count
= local64_read(&hwc
->prev_count
);
167 if (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
168 new_raw_count
) != prev_raw_count
)
172 * Now we have the new raw value and have updated the prev
173 * timestamp already. We can now calculate the elapsed delta
174 * (event-)time and add that to the generic event.
176 * Careful, not all hw sign-extends above the physical width
179 delta
= (new_raw_count
<< shift
) - (prev_raw_count
<< shift
);
182 local64_add(delta
, &event
->count
);
183 local64_sub(delta
, &hwc
->period_left
);
188 static struct perf_ibs perf_ibs_fetch
;
189 static struct perf_ibs perf_ibs_op
;
191 static struct perf_ibs
*get_ibs_pmu(int type
)
193 if (perf_ibs_fetch
.pmu
.type
== type
)
194 return &perf_ibs_fetch
;
195 if (perf_ibs_op
.pmu
.type
== type
)
201 * Use IBS for precise event sampling:
203 * perf record -a -e cpu-cycles:p ... # use ibs op counting cycle count
204 * perf record -a -e r076:p ... # same as -e cpu-cycles:p
205 * perf record -a -e r0C1:p ... # use ibs op counting micro-ops
207 * IbsOpCntCtl (bit 19) of IBS Execution Control Register (IbsOpCtl,
208 * MSRC001_1033) is used to select either cycle or micro-ops counting
211 * The rip of IBS samples has skid 0. Thus, IBS supports precise
212 * levels 1 and 2 and the PERF_EFLAGS_EXACT is set. In rare cases the
213 * rip is invalid when IBS was not able to record the rip correctly.
214 * We clear PERF_EFLAGS_EXACT and take the rip from pt_regs then.
217 static int perf_ibs_precise_event(struct perf_event
*event
, u64
*config
)
219 switch (event
->attr
.precise_ip
) {
229 switch (event
->attr
.type
) {
230 case PERF_TYPE_HARDWARE
:
231 switch (event
->attr
.config
) {
232 case PERF_COUNT_HW_CPU_CYCLES
:
238 switch (event
->attr
.config
) {
243 *config
= IBS_OP_CNT_CTL
;
254 static const struct perf_event_attr ibs_notsupp
= {
263 static int perf_ibs_init(struct perf_event
*event
)
265 struct hw_perf_event
*hwc
= &event
->hw
;
266 struct perf_ibs
*perf_ibs
;
270 perf_ibs
= get_ibs_pmu(event
->attr
.type
);
272 config
= event
->attr
.config
;
274 perf_ibs
= &perf_ibs_op
;
275 ret
= perf_ibs_precise_event(event
, &config
);
280 if (event
->pmu
!= &perf_ibs
->pmu
)
283 if (perf_flags(&event
->attr
) & perf_flags(&ibs_notsupp
))
286 if (config
& ~perf_ibs
->config_mask
)
289 if (hwc
->sample_period
) {
290 if (config
& perf_ibs
->cnt_mask
)
291 /* raw max_cnt may not be set */
293 if (!event
->attr
.sample_freq
&& hwc
->sample_period
& 0x0f)
295 * lower 4 bits can not be set in ibs max cnt,
296 * but allowing it in case we adjust the
297 * sample period to set a frequency.
300 hwc
->sample_period
&= ~0x0FULL
;
301 if (!hwc
->sample_period
)
302 hwc
->sample_period
= 0x10;
304 max_cnt
= config
& perf_ibs
->cnt_mask
;
305 config
&= ~perf_ibs
->cnt_mask
;
306 event
->attr
.sample_period
= max_cnt
<< 4;
307 hwc
->sample_period
= event
->attr
.sample_period
;
310 if (!hwc
->sample_period
)
314 * If we modify hwc->sample_period, we also need to update
315 * hwc->last_period and hwc->period_left.
317 hwc
->last_period
= hwc
->sample_period
;
318 local64_set(&hwc
->period_left
, hwc
->sample_period
);
320 hwc
->config_base
= perf_ibs
->msr
;
321 hwc
->config
= config
;
326 static int perf_ibs_set_period(struct perf_ibs
*perf_ibs
,
327 struct hw_perf_event
*hwc
, u64
*period
)
331 /* ignore lower 4 bits in min count: */
332 overflow
= perf_event_set_period(hwc
, 1<<4, perf_ibs
->max_period
, period
);
333 local64_set(&hwc
->prev_count
, 0);
338 static u64
get_ibs_fetch_count(u64 config
)
340 return (config
& IBS_FETCH_CNT
) >> 12;
343 static u64
get_ibs_op_count(u64 config
)
347 if (config
& IBS_OP_VAL
)
348 count
+= (config
& IBS_OP_MAX_CNT
) << 4; /* cnt rolled over */
350 if (ibs_caps
& IBS_CAPS_RDWROPCNT
)
351 count
+= (config
& IBS_OP_CUR_CNT
) >> 32;
357 perf_ibs_event_update(struct perf_ibs
*perf_ibs
, struct perf_event
*event
,
360 u64 count
= perf_ibs
->get_count(*config
);
363 * Set width to 64 since we do not overflow on max width but
364 * instead on max count. In perf_ibs_set_period() we clear
365 * prev count manually on overflow.
367 while (!perf_event_try_update(event
, count
, 64)) {
368 rdmsrl(event
->hw
.config_base
, *config
);
369 count
= perf_ibs
->get_count(*config
);
373 static inline void perf_ibs_enable_event(struct perf_ibs
*perf_ibs
,
374 struct hw_perf_event
*hwc
, u64 config
)
376 wrmsrl(hwc
->config_base
, hwc
->config
| config
| perf_ibs
->enable_mask
);
380 * Erratum #420 Instruction-Based Sampling Engine May Generate
381 * Interrupt that Cannot Be Cleared:
383 * Must clear counter mask first, then clear the enable bit. See
384 * Revision Guide for AMD Family 10h Processors, Publication #41322.
386 static inline void perf_ibs_disable_event(struct perf_ibs
*perf_ibs
,
387 struct hw_perf_event
*hwc
, u64 config
)
389 config
&= ~perf_ibs
->cnt_mask
;
390 wrmsrl(hwc
->config_base
, config
);
391 config
&= ~perf_ibs
->enable_mask
;
392 wrmsrl(hwc
->config_base
, config
);
396 * We cannot restore the ibs pmu state, so we always needs to update
397 * the event while stopping it and then reset the state when starting
398 * again. Thus, ignoring PERF_EF_RELOAD and PERF_EF_UPDATE flags in
399 * perf_ibs_start()/perf_ibs_stop() and instead always do it.
401 static void perf_ibs_start(struct perf_event
*event
, int flags
)
403 struct hw_perf_event
*hwc
= &event
->hw
;
404 struct perf_ibs
*perf_ibs
= container_of(event
->pmu
, struct perf_ibs
, pmu
);
405 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
408 if (WARN_ON_ONCE(!(hwc
->state
& PERF_HES_STOPPED
)))
411 WARN_ON_ONCE(!(hwc
->state
& PERF_HES_UPTODATE
));
414 perf_ibs_set_period(perf_ibs
, hwc
, &period
);
416 * Set STARTED before enabling the hardware, such that a subsequent NMI
419 set_bit(IBS_STARTED
, pcpu
->state
);
420 clear_bit(IBS_STOPPING
, pcpu
->state
);
421 perf_ibs_enable_event(perf_ibs
, hwc
, period
>> 4);
423 perf_event_update_userpage(event
);
426 static void perf_ibs_stop(struct perf_event
*event
, int flags
)
428 struct hw_perf_event
*hwc
= &event
->hw
;
429 struct perf_ibs
*perf_ibs
= container_of(event
->pmu
, struct perf_ibs
, pmu
);
430 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
434 if (test_and_set_bit(IBS_STOPPING
, pcpu
->state
))
437 stopping
= test_bit(IBS_STARTED
, pcpu
->state
);
439 if (!stopping
&& (hwc
->state
& PERF_HES_UPTODATE
))
442 rdmsrl(hwc
->config_base
, config
);
446 * Set STOPPED before disabling the hardware, such that it
447 * must be visible to NMIs the moment we clear the EN bit,
448 * at which point we can generate an !VALID sample which
449 * we need to consume.
451 set_bit(IBS_STOPPED
, pcpu
->state
);
452 perf_ibs_disable_event(perf_ibs
, hwc
, config
);
454 * Clear STARTED after disabling the hardware; if it were
455 * cleared before an NMI hitting after the clear but before
456 * clearing the EN bit might think it a spurious NMI and not
459 * Clearing it after, however, creates the problem of the NMI
460 * handler seeing STARTED but not having a valid sample.
462 clear_bit(IBS_STARTED
, pcpu
->state
);
463 WARN_ON_ONCE(hwc
->state
& PERF_HES_STOPPED
);
464 hwc
->state
|= PERF_HES_STOPPED
;
467 if (hwc
->state
& PERF_HES_UPTODATE
)
471 * Clear valid bit to not count rollovers on update, rollovers
472 * are only updated in the irq handler.
474 config
&= ~perf_ibs
->valid_mask
;
476 perf_ibs_event_update(perf_ibs
, event
, &config
);
477 hwc
->state
|= PERF_HES_UPTODATE
;
480 static int perf_ibs_add(struct perf_event
*event
, int flags
)
482 struct perf_ibs
*perf_ibs
= container_of(event
->pmu
, struct perf_ibs
, pmu
);
483 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
485 if (test_and_set_bit(IBS_ENABLED
, pcpu
->state
))
488 event
->hw
.state
= PERF_HES_UPTODATE
| PERF_HES_STOPPED
;
492 if (flags
& PERF_EF_START
)
493 perf_ibs_start(event
, PERF_EF_RELOAD
);
498 static void perf_ibs_del(struct perf_event
*event
, int flags
)
500 struct perf_ibs
*perf_ibs
= container_of(event
->pmu
, struct perf_ibs
, pmu
);
501 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
503 if (!test_and_clear_bit(IBS_ENABLED
, pcpu
->state
))
506 perf_ibs_stop(event
, PERF_EF_UPDATE
);
510 perf_event_update_userpage(event
);
513 static void perf_ibs_read(struct perf_event
*event
) { }
515 PMU_FORMAT_ATTR(rand_en
, "config:57");
516 PMU_FORMAT_ATTR(cnt_ctl
, "config:19");
518 static struct attribute
*ibs_fetch_format_attrs
[] = {
519 &format_attr_rand_en
.attr
,
523 static struct attribute
*ibs_op_format_attrs
[] = {
524 NULL
, /* &format_attr_cnt_ctl.attr if IBS_CAPS_OPCNT */
528 static struct perf_ibs perf_ibs_fetch
= {
530 .task_ctx_nr
= perf_invalid_context
,
532 .event_init
= perf_ibs_init
,
535 .start
= perf_ibs_start
,
536 .stop
= perf_ibs_stop
,
537 .read
= perf_ibs_read
,
539 .msr
= MSR_AMD64_IBSFETCHCTL
,
540 .config_mask
= IBS_FETCH_CONFIG_MASK
,
541 .cnt_mask
= IBS_FETCH_MAX_CNT
,
542 .enable_mask
= IBS_FETCH_ENABLE
,
543 .valid_mask
= IBS_FETCH_VAL
,
544 .max_period
= IBS_FETCH_MAX_CNT
<< 4,
545 .offset_mask
= { MSR_AMD64_IBSFETCH_REG_MASK
},
546 .offset_max
= MSR_AMD64_IBSFETCH_REG_COUNT
,
547 .format_attrs
= ibs_fetch_format_attrs
,
549 .get_count
= get_ibs_fetch_count
,
552 static struct perf_ibs perf_ibs_op
= {
554 .task_ctx_nr
= perf_invalid_context
,
556 .event_init
= perf_ibs_init
,
559 .start
= perf_ibs_start
,
560 .stop
= perf_ibs_stop
,
561 .read
= perf_ibs_read
,
563 .msr
= MSR_AMD64_IBSOPCTL
,
564 .config_mask
= IBS_OP_CONFIG_MASK
,
565 .cnt_mask
= IBS_OP_MAX_CNT
,
566 .enable_mask
= IBS_OP_ENABLE
,
567 .valid_mask
= IBS_OP_VAL
,
568 .max_period
= IBS_OP_MAX_CNT
<< 4,
569 .offset_mask
= { MSR_AMD64_IBSOP_REG_MASK
},
570 .offset_max
= MSR_AMD64_IBSOP_REG_COUNT
,
571 .format_attrs
= ibs_op_format_attrs
,
573 .get_count
= get_ibs_op_count
,
576 static int perf_ibs_handle_irq(struct perf_ibs
*perf_ibs
, struct pt_regs
*iregs
)
578 struct cpu_perf_ibs
*pcpu
= this_cpu_ptr(perf_ibs
->pcpu
);
579 struct perf_event
*event
= pcpu
->event
;
580 struct hw_perf_event
*hwc
= &event
->hw
;
581 struct perf_sample_data data
;
582 struct perf_raw_record raw
;
584 struct perf_ibs_data ibs_data
;
585 int offset
, size
, check_rip
, offset_max
, throttle
= 0;
587 u64
*buf
, *config
, period
;
589 if (!test_bit(IBS_STARTED
, pcpu
->state
)) {
592 * Catch spurious interrupts after stopping IBS: After
593 * disabling IBS there could be still incoming NMIs
594 * with samples that even have the valid bit cleared.
595 * Mark all this NMIs as handled.
597 if (test_and_clear_bit(IBS_STOPPED
, pcpu
->state
))
603 msr
= hwc
->config_base
;
606 if (!(*buf
++ & perf_ibs
->valid_mask
))
609 config
= &ibs_data
.regs
[0];
610 perf_ibs_event_update(perf_ibs
, event
, config
);
611 perf_sample_data_init(&data
, 0, hwc
->last_period
);
612 if (!perf_ibs_set_period(perf_ibs
, hwc
, &period
))
613 goto out
; /* no sw counter overflow */
615 ibs_data
.caps
= ibs_caps
;
618 check_rip
= (perf_ibs
== &perf_ibs_op
&& (ibs_caps
& IBS_CAPS_RIPINVALIDCHK
));
619 if (event
->attr
.sample_type
& PERF_SAMPLE_RAW
)
620 offset_max
= perf_ibs
->offset_max
;
626 rdmsrl(msr
+ offset
, *buf
++);
628 offset
= find_next_bit(perf_ibs
->offset_mask
,
629 perf_ibs
->offset_max
,
631 } while (offset
< offset_max
);
632 if (event
->attr
.sample_type
& PERF_SAMPLE_RAW
) {
634 * Read IbsBrTarget and IbsOpData4 separately
635 * depending on their availability.
636 * Can't add to offset_max as they are staggered
638 if (ibs_caps
& IBS_CAPS_BRNTRGT
) {
639 rdmsrl(MSR_AMD64_IBSBRTARGET
, *buf
++);
642 if (ibs_caps
& IBS_CAPS_OPDATA4
) {
643 rdmsrl(MSR_AMD64_IBSOPDATA4
, *buf
++);
647 ibs_data
.size
= sizeof(u64
) * size
;
650 if (check_rip
&& (ibs_data
.regs
[2] & IBS_RIP_INVALID
)) {
651 regs
.flags
&= ~PERF_EFLAGS_EXACT
;
653 set_linear_ip(®s
, ibs_data
.regs
[1]);
654 regs
.flags
|= PERF_EFLAGS_EXACT
;
657 if (event
->attr
.sample_type
& PERF_SAMPLE_RAW
) {
658 raw
.size
= sizeof(u32
) + ibs_data
.size
;
659 raw
.data
= ibs_data
.data
;
663 throttle
= perf_event_overflow(event
, &data
, ®s
);
666 perf_ibs_stop(event
, 0);
668 perf_ibs_enable_event(perf_ibs
, hwc
, period
>> 4);
670 perf_event_update_userpage(event
);
676 perf_ibs_nmi_handler(unsigned int cmd
, struct pt_regs
*regs
)
678 u64 stamp
= sched_clock();
681 handled
+= perf_ibs_handle_irq(&perf_ibs_fetch
, regs
);
682 handled
+= perf_ibs_handle_irq(&perf_ibs_op
, regs
);
685 inc_irq_stat(apic_perf_irqs
);
687 perf_sample_event_took(sched_clock() - stamp
);
691 NOKPROBE_SYMBOL(perf_ibs_nmi_handler
);
693 static __init
int perf_ibs_pmu_init(struct perf_ibs
*perf_ibs
, char *name
)
695 struct cpu_perf_ibs __percpu
*pcpu
;
698 pcpu
= alloc_percpu(struct cpu_perf_ibs
);
702 perf_ibs
->pcpu
= pcpu
;
704 /* register attributes */
705 if (perf_ibs
->format_attrs
[0]) {
706 memset(&perf_ibs
->format_group
, 0, sizeof(perf_ibs
->format_group
));
707 perf_ibs
->format_group
.name
= "format";
708 perf_ibs
->format_group
.attrs
= perf_ibs
->format_attrs
;
710 memset(&perf_ibs
->attr_groups
, 0, sizeof(perf_ibs
->attr_groups
));
711 perf_ibs
->attr_groups
[0] = &perf_ibs
->format_group
;
712 perf_ibs
->pmu
.attr_groups
= perf_ibs
->attr_groups
;
715 ret
= perf_pmu_register(&perf_ibs
->pmu
, name
, -1);
717 perf_ibs
->pcpu
= NULL
;
724 static __init
int perf_event_ibs_init(void)
726 struct attribute
**attr
= ibs_op_format_attrs
;
729 return -ENODEV
; /* ibs not supported by the cpu */
731 perf_ibs_pmu_init(&perf_ibs_fetch
, "ibs_fetch");
733 if (ibs_caps
& IBS_CAPS_OPCNT
) {
734 perf_ibs_op
.config_mask
|= IBS_OP_CNT_CTL
;
735 *attr
++ = &format_attr_cnt_ctl
.attr
;
737 perf_ibs_pmu_init(&perf_ibs_op
, "ibs_op");
739 register_nmi_handler(NMI_LOCAL
, perf_ibs_nmi_handler
, 0, "perf_ibs");
740 pr_info("perf: AMD IBS detected (0x%08x)\n", ibs_caps
);
745 #else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */
747 static __init
int perf_event_ibs_init(void) { return 0; }
751 /* IBS - apic initialization, for perf and oprofile */
753 static __init u32
__get_ibs_caps(void)
756 unsigned int max_level
;
758 if (!boot_cpu_has(X86_FEATURE_IBS
))
761 /* check IBS cpuid feature flags */
762 max_level
= cpuid_eax(0x80000000);
763 if (max_level
< IBS_CPUID_FEATURES
)
764 return IBS_CAPS_DEFAULT
;
766 caps
= cpuid_eax(IBS_CPUID_FEATURES
);
767 if (!(caps
& IBS_CAPS_AVAIL
))
768 /* cpuid flags not valid */
769 return IBS_CAPS_DEFAULT
;
774 u32
get_ibs_caps(void)
779 EXPORT_SYMBOL(get_ibs_caps
);
781 static inline int get_eilvt(int offset
)
783 return !setup_APIC_eilvt(offset
, 0, APIC_EILVT_MSG_NMI
, 1);
786 static inline int put_eilvt(int offset
)
788 return !setup_APIC_eilvt(offset
, 0, 0, 1);
792 * Check and reserve APIC extended interrupt LVT offset for IBS if available.
794 static inline int ibs_eilvt_valid(void)
802 rdmsrl(MSR_AMD64_IBSCTL
, val
);
803 offset
= val
& IBSCTL_LVT_OFFSET_MASK
;
805 if (!(val
& IBSCTL_LVT_OFFSET_VALID
)) {
806 pr_err(FW_BUG
"cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
807 smp_processor_id(), offset
, MSR_AMD64_IBSCTL
, val
);
811 if (!get_eilvt(offset
)) {
812 pr_err(FW_BUG
"cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
813 smp_processor_id(), offset
, MSR_AMD64_IBSCTL
, val
);
824 static int setup_ibs_ctl(int ibs_eilvt_off
)
826 struct pci_dev
*cpu_cfg
;
833 cpu_cfg
= pci_get_device(PCI_VENDOR_ID_AMD
,
834 PCI_DEVICE_ID_AMD_10H_NB_MISC
,
839 pci_write_config_dword(cpu_cfg
, IBSCTL
, ibs_eilvt_off
840 | IBSCTL_LVT_OFFSET_VALID
);
841 pci_read_config_dword(cpu_cfg
, IBSCTL
, &value
);
842 if (value
!= (ibs_eilvt_off
| IBSCTL_LVT_OFFSET_VALID
)) {
843 pci_dev_put(cpu_cfg
);
844 pr_debug("Failed to setup IBS LVT offset, IBSCTL = 0x%08x\n",
851 pr_debug("No CPU node configured for IBS\n");
859 * This runs only on the current cpu. We try to find an LVT offset and
860 * setup the local APIC. For this we must disable preemption. On
861 * success we initialize all nodes with this offset. This updates then
862 * the offset in the IBS_CTL per-node msr. The per-core APIC setup of
863 * the IBS interrupt vector is handled by perf_ibs_cpu_notifier that
864 * is using the new offset.
866 static void force_ibs_eilvt_setup(void)
872 /* find the next free available EILVT entry, skip offset 0 */
873 for (offset
= 1; offset
< APIC_EILVT_NR_MAX
; offset
++) {
874 if (get_eilvt(offset
))
879 if (offset
== APIC_EILVT_NR_MAX
) {
880 pr_debug("No EILVT entry available\n");
884 ret
= setup_ibs_ctl(offset
);
888 if (!ibs_eilvt_valid())
891 pr_info("IBS: LVT offset %d assigned\n", offset
);
901 static void ibs_eilvt_setup(void)
904 * Force LVT offset assignment for family 10h: The offsets are
905 * not assigned by the BIOS for this family, so the OS is
906 * responsible for doing it. If the OS assignment fails, fall
907 * back to BIOS settings and try to setup this.
909 if (boot_cpu_data
.x86
== 0x10)
910 force_ibs_eilvt_setup();
913 static inline int get_ibs_lvt_offset(void)
917 rdmsrl(MSR_AMD64_IBSCTL
, val
);
918 if (!(val
& IBSCTL_LVT_OFFSET_VALID
))
921 return val
& IBSCTL_LVT_OFFSET_MASK
;
924 static void setup_APIC_ibs(void *dummy
)
928 offset
= get_ibs_lvt_offset();
932 if (!setup_APIC_eilvt(offset
, 0, APIC_EILVT_MSG_NMI
, 0))
935 pr_warn("perf: IBS APIC setup failed on cpu #%d\n",
939 static void clear_APIC_ibs(void *dummy
)
943 offset
= get_ibs_lvt_offset();
945 setup_APIC_eilvt(offset
, 0, APIC_EILVT_MSG_FIX
, 1);
950 static int perf_ibs_suspend(void)
952 clear_APIC_ibs(NULL
);
956 static void perf_ibs_resume(void)
959 setup_APIC_ibs(NULL
);
962 static struct syscore_ops perf_ibs_syscore_ops
= {
963 .resume
= perf_ibs_resume
,
964 .suspend
= perf_ibs_suspend
,
967 static void perf_ibs_pm_init(void)
969 register_syscore_ops(&perf_ibs_syscore_ops
);
974 static inline void perf_ibs_pm_init(void) { }
979 perf_ibs_cpu_notifier(struct notifier_block
*self
, unsigned long action
, void *hcpu
)
981 switch (action
& ~CPU_TASKS_FROZEN
) {
983 setup_APIC_ibs(NULL
);
986 clear_APIC_ibs(NULL
);
995 static __init
int amd_ibs_init(void)
1000 caps
= __get_ibs_caps();
1002 return -ENODEV
; /* ibs not supported by the cpu */
1006 if (!ibs_eilvt_valid())
1010 cpu_notifier_register_begin();
1012 /* make ibs_caps visible to other cpus: */
1014 smp_call_function(setup_APIC_ibs
, NULL
, 1);
1015 __perf_cpu_notifier(perf_ibs_cpu_notifier
);
1016 cpu_notifier_register_done();
1018 ret
= perf_event_ibs_init();
1021 pr_err("Failed to setup IBS, %d\n", ret
);
1025 /* Since we need the pci subsystem to init ibs we can't do this earlier: */
1026 device_initcall(amd_ibs_init
);