Commit | Line | Data |
---|---|---|
241771ef IM |
1 | /* |
2 | * Performance counter x86 architecture code | |
3 | * | |
4 | * Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de> | |
5 | * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar | |
6 | * | |
7 | * For licencing details see kernel-base/COPYING | |
8 | */ | |
9 | ||
10 | #include <linux/perf_counter.h> | |
11 | #include <linux/capability.h> | |
12 | #include <linux/notifier.h> | |
13 | #include <linux/hardirq.h> | |
14 | #include <linux/kprobes.h> | |
4ac13294 | 15 | #include <linux/module.h> |
241771ef IM |
16 | #include <linux/kdebug.h> |
17 | #include <linux/sched.h> | |
18 | ||
19 | #include <asm/intel_arch_perfmon.h> | |
20 | #include <asm/apic.h> | |
21 | ||
22 | static bool perf_counters_initialized __read_mostly; | |
23 | ||
24 | /* | |
25 | * Number of (generic) HW counters: | |
26 | */ | |
27 | static int nr_hw_counters __read_mostly; | |
28 | static u32 perf_counter_mask __read_mostly; | |
29 | ||
30 | /* No support for fixed function counters yet */ | |
31 | ||
32 | #define MAX_HW_COUNTERS 8 | |
33 | ||
34 | struct cpu_hw_counters { | |
35 | struct perf_counter *counters[MAX_HW_COUNTERS]; | |
36 | unsigned long used[BITS_TO_LONGS(MAX_HW_COUNTERS)]; | |
241771ef IM |
37 | }; |
38 | ||
39 | /* | |
40 | * Intel PerfMon v3. Used on Core2 and later. | |
41 | */ | |
42 | static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters); | |
43 | ||
44 | const int intel_perfmon_event_map[] = | |
45 | { | |
46 | [PERF_COUNT_CYCLES] = 0x003c, | |
47 | [PERF_COUNT_INSTRUCTIONS] = 0x00c0, | |
48 | [PERF_COUNT_CACHE_REFERENCES] = 0x4f2e, | |
49 | [PERF_COUNT_CACHE_MISSES] = 0x412e, | |
50 | [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4, | |
51 | [PERF_COUNT_BRANCH_MISSES] = 0x00c5, | |
52 | }; | |
53 | ||
54 | const int max_intel_perfmon_events = ARRAY_SIZE(intel_perfmon_event_map); | |
55 | ||
56 | /* | |
57 | * Setup the hardware configuration for a given hw_event_type | |
58 | */ | |
621a01ea | 59 | static int __hw_perf_counter_init(struct perf_counter *counter) |
241771ef | 60 | { |
9f66a381 | 61 | struct perf_counter_hw_event *hw_event = &counter->hw_event; |
241771ef IM |
62 | struct hw_perf_counter *hwc = &counter->hw; |
63 | ||
64 | if (unlikely(!perf_counters_initialized)) | |
65 | return -EINVAL; | |
66 | ||
67 | /* | |
68 | * Count user events, and generate PMC IRQs: | |
69 | * (keep 'enabled' bit clear for now) | |
70 | */ | |
71 | hwc->config = ARCH_PERFMON_EVENTSEL_USR | ARCH_PERFMON_EVENTSEL_INT; | |
72 | ||
73 | /* | |
74 | * If privileged enough, count OS events too, and allow | |
75 | * NMI events as well: | |
76 | */ | |
77 | hwc->nmi = 0; | |
78 | if (capable(CAP_SYS_ADMIN)) { | |
79 | hwc->config |= ARCH_PERFMON_EVENTSEL_OS; | |
9f66a381 | 80 | if (hw_event->nmi) |
241771ef IM |
81 | hwc->nmi = 1; |
82 | } | |
83 | ||
9f66a381 IM |
84 | hwc->config_base = MSR_ARCH_PERFMON_EVENTSEL0; |
85 | hwc->counter_base = MSR_ARCH_PERFMON_PERFCTR0; | |
241771ef | 86 | |
9f66a381 | 87 | hwc->irq_period = hw_event->irq_period; |
241771ef IM |
88 | /* |
89 | * Intel PMCs cannot be accessed sanely above 32 bit width, | |
90 | * so we install an artificial 1<<31 period regardless of | |
91 | * the generic counter period: | |
92 | */ | |
93 | if (!hwc->irq_period) | |
94 | hwc->irq_period = 0x7FFFFFFF; | |
95 | ||
9f66a381 | 96 | hwc->next_count = -(s32)hwc->irq_period; |
241771ef IM |
97 | |
98 | /* | |
dfa7c899 | 99 | * Raw event type provide the config in the event structure |
241771ef | 100 | */ |
9f66a381 IM |
101 | if (hw_event->raw) { |
102 | hwc->config |= hw_event->type; | |
241771ef | 103 | } else { |
9f66a381 | 104 | if (hw_event->type >= max_intel_perfmon_events) |
241771ef IM |
105 | return -EINVAL; |
106 | /* | |
107 | * The generic map: | |
108 | */ | |
9f66a381 | 109 | hwc->config |= intel_perfmon_event_map[hw_event->type]; |
241771ef | 110 | } |
241771ef IM |
111 | counter->wakeup_pending = 0; |
112 | ||
113 | return 0; | |
114 | } | |
115 | ||
241771ef IM |
116 | void hw_perf_enable_all(void) |
117 | { | |
43874d23 | 118 | wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, perf_counter_mask, 0); |
241771ef IM |
119 | } |
120 | ||
01b2838c | 121 | void hw_perf_restore(u64 ctrl) |
241771ef | 122 | { |
4ac13294 TG |
123 | wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, ctrl, 0); |
124 | } | |
01b2838c | 125 | EXPORT_SYMBOL_GPL(hw_perf_restore); |
4ac13294 | 126 | |
01b2838c | 127 | u64 hw_perf_save_disable(void) |
4ac13294 TG |
128 | { |
129 | u64 ctrl; | |
130 | ||
131 | rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); | |
241771ef | 132 | wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0); |
4ac13294 | 133 | return ctrl; |
241771ef | 134 | } |
01b2838c | 135 | EXPORT_SYMBOL_GPL(hw_perf_save_disable); |
241771ef | 136 | |
7e2ae347 | 137 | static inline void |
621a01ea | 138 | __x86_perf_counter_disable(struct hw_perf_counter *hwc, unsigned int idx) |
7e2ae347 IM |
139 | { |
140 | wrmsr(hwc->config_base + idx, hwc->config, 0); | |
141 | } | |
142 | ||
241771ef IM |
143 | static DEFINE_PER_CPU(u64, prev_next_count[MAX_HW_COUNTERS]); |
144 | ||
7e2ae347 | 145 | static void __hw_perf_counter_set_period(struct hw_perf_counter *hwc, int idx) |
241771ef IM |
146 | { |
147 | per_cpu(prev_next_count[idx], smp_processor_id()) = hwc->next_count; | |
148 | ||
149 | wrmsr(hwc->counter_base + idx, hwc->next_count, 0); | |
7e2ae347 IM |
150 | } |
151 | ||
621a01ea | 152 | static void __x86_perf_counter_enable(struct hw_perf_counter *hwc, int idx) |
7e2ae347 IM |
153 | { |
154 | wrmsr(hwc->config_base + idx, | |
155 | hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE, 0); | |
241771ef IM |
156 | } |
157 | ||
621a01ea | 158 | static void x86_perf_counter_enable(struct perf_counter *counter) |
241771ef IM |
159 | { |
160 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | |
161 | struct hw_perf_counter *hwc = &counter->hw; | |
162 | int idx = hwc->idx; | |
163 | ||
164 | /* Try to get the previous counter again */ | |
165 | if (test_and_set_bit(idx, cpuc->used)) { | |
166 | idx = find_first_zero_bit(cpuc->used, nr_hw_counters); | |
167 | set_bit(idx, cpuc->used); | |
168 | hwc->idx = idx; | |
169 | } | |
170 | ||
171 | perf_counters_lapic_init(hwc->nmi); | |
172 | ||
621a01ea | 173 | __x86_perf_counter_disable(hwc, idx); |
241771ef IM |
174 | |
175 | cpuc->counters[idx] = counter; | |
7e2ae347 IM |
176 | |
177 | __hw_perf_counter_set_period(hwc, idx); | |
621a01ea | 178 | __x86_perf_counter_enable(hwc, idx); |
241771ef IM |
179 | } |
180 | ||
241771ef IM |
181 | static void __hw_perf_save_counter(struct perf_counter *counter, |
182 | struct hw_perf_counter *hwc, int idx) | |
183 | { | |
184 | s64 raw = -1; | |
185 | s64 delta; | |
241771ef IM |
186 | |
187 | /* | |
188 | * Get the raw hw counter value: | |
189 | */ | |
1e125676 | 190 | rdmsrl(hwc->counter_base + idx, raw); |
241771ef IM |
191 | |
192 | /* | |
193 | * Rebase it to zero (it started counting at -irq_period), | |
194 | * to see the delta since ->prev_count: | |
195 | */ | |
196 | delta = (s64)hwc->irq_period + (s64)(s32)raw; | |
197 | ||
198 | atomic64_counter_set(counter, hwc->prev_count + delta); | |
199 | ||
200 | /* | |
201 | * Adjust the ->prev_count offset - if we went beyond | |
202 | * irq_period of units, then we got an IRQ and the counter | |
203 | * was set back to -irq_period: | |
204 | */ | |
205 | while (delta >= (s64)hwc->irq_period) { | |
206 | hwc->prev_count += hwc->irq_period; | |
207 | delta -= (s64)hwc->irq_period; | |
208 | } | |
209 | ||
210 | /* | |
211 | * Calculate the next raw counter value we'll write into | |
212 | * the counter at the next sched-in time: | |
213 | */ | |
214 | delta -= (s64)hwc->irq_period; | |
215 | ||
216 | hwc->next_count = (s32)delta; | |
217 | } | |
218 | ||
219 | void perf_counter_print_debug(void) | |
220 | { | |
221 | u64 ctrl, status, overflow, pmc_ctrl, pmc_count, next_count; | |
1e125676 IM |
222 | int cpu, idx; |
223 | ||
224 | if (!nr_hw_counters) | |
225 | return; | |
241771ef IM |
226 | |
227 | local_irq_disable(); | |
228 | ||
229 | cpu = smp_processor_id(); | |
230 | ||
1e125676 IM |
231 | rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); |
232 | rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); | |
233 | rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow); | |
241771ef IM |
234 | |
235 | printk(KERN_INFO "\n"); | |
236 | printk(KERN_INFO "CPU#%d: ctrl: %016llx\n", cpu, ctrl); | |
237 | printk(KERN_INFO "CPU#%d: status: %016llx\n", cpu, status); | |
238 | printk(KERN_INFO "CPU#%d: overflow: %016llx\n", cpu, overflow); | |
239 | ||
240 | for (idx = 0; idx < nr_hw_counters; idx++) { | |
1e125676 IM |
241 | rdmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, pmc_ctrl); |
242 | rdmsrl(MSR_ARCH_PERFMON_PERFCTR0 + idx, pmc_count); | |
241771ef IM |
243 | |
244 | next_count = per_cpu(prev_next_count[idx], cpu); | |
245 | ||
246 | printk(KERN_INFO "CPU#%d: PMC%d ctrl: %016llx\n", | |
247 | cpu, idx, pmc_ctrl); | |
248 | printk(KERN_INFO "CPU#%d: PMC%d count: %016llx\n", | |
249 | cpu, idx, pmc_count); | |
250 | printk(KERN_INFO "CPU#%d: PMC%d next: %016llx\n", | |
251 | cpu, idx, next_count); | |
252 | } | |
253 | local_irq_enable(); | |
254 | } | |
255 | ||
621a01ea | 256 | static void x86_perf_counter_disable(struct perf_counter *counter) |
241771ef IM |
257 | { |
258 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | |
259 | struct hw_perf_counter *hwc = &counter->hw; | |
260 | unsigned int idx = hwc->idx; | |
261 | ||
621a01ea | 262 | __x86_perf_counter_disable(hwc, idx); |
241771ef IM |
263 | |
264 | clear_bit(idx, cpuc->used); | |
265 | cpuc->counters[idx] = NULL; | |
266 | __hw_perf_save_counter(counter, hwc, idx); | |
267 | } | |
268 | ||
621a01ea | 269 | static void x86_perf_counter_read(struct perf_counter *counter) |
241771ef IM |
270 | { |
271 | struct hw_perf_counter *hwc = &counter->hw; | |
272 | unsigned long addr = hwc->counter_base + hwc->idx; | |
273 | s64 offs, val = -1LL; | |
274 | s32 val32; | |
241771ef IM |
275 | |
276 | /* Careful: NMI might modify the counter offset */ | |
277 | do { | |
278 | offs = hwc->prev_count; | |
1e125676 | 279 | rdmsrl(addr, val); |
241771ef IM |
280 | } while (offs != hwc->prev_count); |
281 | ||
282 | val32 = (s32) val; | |
5c92d124 | 283 | val = (s64)hwc->irq_period + (s64)val32; |
241771ef IM |
284 | atomic64_counter_set(counter, hwc->prev_count + val); |
285 | } | |
286 | ||
287 | static void perf_store_irq_data(struct perf_counter *counter, u64 data) | |
288 | { | |
289 | struct perf_data *irqdata = counter->irqdata; | |
290 | ||
291 | if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) { | |
292 | irqdata->overrun++; | |
293 | } else { | |
294 | u64 *p = (u64 *) &irqdata->data[irqdata->len]; | |
295 | ||
296 | *p = data; | |
297 | irqdata->len += sizeof(u64); | |
298 | } | |
299 | } | |
300 | ||
7e2ae347 IM |
301 | /* |
302 | * NMI-safe enable method: | |
303 | */ | |
241771ef IM |
304 | static void perf_save_and_restart(struct perf_counter *counter) |
305 | { | |
306 | struct hw_perf_counter *hwc = &counter->hw; | |
307 | int idx = hwc->idx; | |
7e2ae347 | 308 | u64 pmc_ctrl; |
241771ef | 309 | |
1e125676 | 310 | rdmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, pmc_ctrl); |
241771ef | 311 | |
7e2ae347 IM |
312 | __hw_perf_save_counter(counter, hwc, idx); |
313 | __hw_perf_counter_set_period(hwc, idx); | |
314 | ||
315 | if (pmc_ctrl & ARCH_PERFMON_EVENTSEL0_ENABLE) | |
621a01ea | 316 | __x86_perf_counter_enable(hwc, idx); |
241771ef IM |
317 | } |
318 | ||
319 | static void | |
04289bb9 | 320 | perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown) |
241771ef | 321 | { |
04289bb9 | 322 | struct perf_counter *counter, *group_leader = sibling->group_leader; |
241771ef IM |
323 | int bit; |
324 | ||
04289bb9 IM |
325 | /* |
326 | * Store the counter's own timestamp first: | |
327 | */ | |
328 | perf_store_irq_data(sibling, sibling->hw_event.type); | |
329 | perf_store_irq_data(sibling, atomic64_counter_read(sibling)); | |
241771ef | 330 | |
04289bb9 IM |
331 | /* |
332 | * Then store sibling timestamps (if any): | |
333 | */ | |
334 | list_for_each_entry(counter, &group_leader->sibling_list, list_entry) { | |
6a930700 | 335 | if (counter->state != PERF_COUNTER_STATE_ACTIVE) { |
241771ef IM |
336 | /* |
337 | * When counter was not in the overflow mask, we have to | |
338 | * read it from hardware. We read it as well, when it | |
339 | * has not been read yet and clear the bit in the | |
340 | * status mask. | |
341 | */ | |
342 | bit = counter->hw.idx; | |
343 | if (!test_bit(bit, (unsigned long *) overflown) || | |
344 | test_bit(bit, (unsigned long *) status)) { | |
345 | clear_bit(bit, (unsigned long *) status); | |
346 | perf_save_and_restart(counter); | |
347 | } | |
348 | } | |
04289bb9 IM |
349 | perf_store_irq_data(sibling, counter->hw_event.type); |
350 | perf_store_irq_data(sibling, atomic64_counter_read(counter)); | |
241771ef IM |
351 | } |
352 | } | |
353 | ||
354 | /* | |
355 | * This handler is triggered by the local APIC, so the APIC IRQ handling | |
356 | * rules apply: | |
357 | */ | |
358 | static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi) | |
359 | { | |
360 | int bit, cpu = smp_processor_id(); | |
43874d23 | 361 | u64 ack, status, saved_global; |
241771ef | 362 | struct cpu_hw_counters *cpuc; |
43874d23 IM |
363 | |
364 | rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, saved_global); | |
241771ef | 365 | |
241771ef IM |
366 | /* Disable counters globally */ |
367 | wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0); | |
368 | ack_APIC_irq(); | |
369 | ||
370 | cpuc = &per_cpu(cpu_hw_counters, cpu); | |
371 | ||
87b9cf46 IM |
372 | rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); |
373 | if (!status) | |
374 | goto out; | |
375 | ||
241771ef IM |
376 | again: |
377 | ack = status; | |
378 | for_each_bit(bit, (unsigned long *) &status, nr_hw_counters) { | |
379 | struct perf_counter *counter = cpuc->counters[bit]; | |
380 | ||
381 | clear_bit(bit, (unsigned long *) &status); | |
382 | if (!counter) | |
383 | continue; | |
384 | ||
385 | perf_save_and_restart(counter); | |
386 | ||
9f66a381 | 387 | switch (counter->hw_event.record_type) { |
241771ef IM |
388 | case PERF_RECORD_SIMPLE: |
389 | continue; | |
390 | case PERF_RECORD_IRQ: | |
391 | perf_store_irq_data(counter, instruction_pointer(regs)); | |
392 | break; | |
393 | case PERF_RECORD_GROUP: | |
241771ef IM |
394 | perf_handle_group(counter, &status, &ack); |
395 | break; | |
396 | } | |
397 | /* | |
398 | * From NMI context we cannot call into the scheduler to | |
399 | * do a task wakeup - but we mark these counters as | |
400 | * wakeup_pending and initate a wakeup callback: | |
401 | */ | |
402 | if (nmi) { | |
403 | counter->wakeup_pending = 1; | |
404 | set_tsk_thread_flag(current, TIF_PERF_COUNTERS); | |
405 | } else { | |
406 | wake_up(&counter->waitq); | |
407 | } | |
408 | } | |
409 | ||
410 | wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack, 0); | |
411 | ||
412 | /* | |
413 | * Repeat if there is more work to be done: | |
414 | */ | |
415 | rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); | |
416 | if (status) | |
417 | goto again; | |
87b9cf46 | 418 | out: |
241771ef | 419 | /* |
43874d23 | 420 | * Restore - do not reenable when global enable is off: |
241771ef | 421 | */ |
43874d23 | 422 | wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, saved_global, 0); |
241771ef IM |
423 | } |
424 | ||
425 | void smp_perf_counter_interrupt(struct pt_regs *regs) | |
426 | { | |
427 | irq_enter(); | |
92bf73e9 | 428 | inc_irq_stat(apic_perf_irqs); |
241771ef IM |
429 | apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR); |
430 | __smp_perf_counter_interrupt(regs, 0); | |
431 | ||
432 | irq_exit(); | |
433 | } | |
434 | ||
435 | /* | |
436 | * This handler is triggered by NMI contexts: | |
437 | */ | |
438 | void perf_counter_notify(struct pt_regs *regs) | |
439 | { | |
440 | struct cpu_hw_counters *cpuc; | |
441 | unsigned long flags; | |
442 | int bit, cpu; | |
443 | ||
444 | local_irq_save(flags); | |
445 | cpu = smp_processor_id(); | |
446 | cpuc = &per_cpu(cpu_hw_counters, cpu); | |
447 | ||
448 | for_each_bit(bit, cpuc->used, nr_hw_counters) { | |
449 | struct perf_counter *counter = cpuc->counters[bit]; | |
450 | ||
451 | if (!counter) | |
452 | continue; | |
453 | ||
454 | if (counter->wakeup_pending) { | |
455 | counter->wakeup_pending = 0; | |
456 | wake_up(&counter->waitq); | |
457 | } | |
458 | } | |
459 | ||
460 | local_irq_restore(flags); | |
461 | } | |
462 | ||
463 | void __cpuinit perf_counters_lapic_init(int nmi) | |
464 | { | |
465 | u32 apic_val; | |
466 | ||
467 | if (!perf_counters_initialized) | |
468 | return; | |
469 | /* | |
470 | * Enable the performance counter vector in the APIC LVT: | |
471 | */ | |
472 | apic_val = apic_read(APIC_LVTERR); | |
473 | ||
474 | apic_write(APIC_LVTERR, apic_val | APIC_LVT_MASKED); | |
475 | if (nmi) | |
476 | apic_write(APIC_LVTPC, APIC_DM_NMI); | |
477 | else | |
478 | apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR); | |
479 | apic_write(APIC_LVTERR, apic_val); | |
480 | } | |
481 | ||
482 | static int __kprobes | |
483 | perf_counter_nmi_handler(struct notifier_block *self, | |
484 | unsigned long cmd, void *__args) | |
485 | { | |
486 | struct die_args *args = __args; | |
487 | struct pt_regs *regs; | |
488 | ||
489 | if (likely(cmd != DIE_NMI_IPI)) | |
490 | return NOTIFY_DONE; | |
491 | ||
492 | regs = args->regs; | |
493 | ||
494 | apic_write(APIC_LVTPC, APIC_DM_NMI); | |
495 | __smp_perf_counter_interrupt(regs, 1); | |
496 | ||
497 | return NOTIFY_STOP; | |
498 | } | |
499 | ||
500 | static __read_mostly struct notifier_block perf_counter_nmi_notifier = { | |
501 | .notifier_call = perf_counter_nmi_handler | |
502 | }; | |
503 | ||
504 | void __init init_hw_perf_counters(void) | |
505 | { | |
506 | union cpuid10_eax eax; | |
507 | unsigned int unused; | |
508 | unsigned int ebx; | |
509 | ||
510 | if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) | |
511 | return; | |
512 | ||
513 | /* | |
514 | * Check whether the Architectural PerfMon supports | |
515 | * Branch Misses Retired Event or not. | |
516 | */ | |
517 | cpuid(10, &(eax.full), &ebx, &unused, &unused); | |
518 | if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) | |
519 | return; | |
520 | ||
521 | printk(KERN_INFO "Intel Performance Monitoring support detected.\n"); | |
522 | ||
523 | printk(KERN_INFO "... version: %d\n", eax.split.version_id); | |
524 | printk(KERN_INFO "... num_counters: %d\n", eax.split.num_counters); | |
525 | nr_hw_counters = eax.split.num_counters; | |
526 | if (nr_hw_counters > MAX_HW_COUNTERS) { | |
527 | nr_hw_counters = MAX_HW_COUNTERS; | |
528 | WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!", | |
529 | nr_hw_counters, MAX_HW_COUNTERS); | |
530 | } | |
531 | perf_counter_mask = (1 << nr_hw_counters) - 1; | |
532 | perf_max_counters = nr_hw_counters; | |
533 | ||
534 | printk(KERN_INFO "... bit_width: %d\n", eax.split.bit_width); | |
535 | printk(KERN_INFO "... mask_length: %d\n", eax.split.mask_length); | |
536 | ||
537 | perf_counters_lapic_init(0); | |
538 | register_die_notifier(&perf_counter_nmi_notifier); | |
539 | ||
540 | perf_counters_initialized = true; | |
541 | } | |
621a01ea | 542 | |
5c92d124 | 543 | static const struct hw_perf_counter_ops x86_perf_counter_ops = { |
621a01ea IM |
544 | .hw_perf_counter_enable = x86_perf_counter_enable, |
545 | .hw_perf_counter_disable = x86_perf_counter_disable, | |
546 | .hw_perf_counter_read = x86_perf_counter_read, | |
547 | }; | |
548 | ||
5c92d124 IM |
549 | const struct hw_perf_counter_ops * |
550 | hw_perf_counter_init(struct perf_counter *counter) | |
621a01ea IM |
551 | { |
552 | int err; | |
553 | ||
554 | err = __hw_perf_counter_init(counter); | |
555 | if (err) | |
556 | return NULL; | |
557 | ||
558 | return &x86_perf_counter_ops; | |
559 | } |