perfcounters: throttle on too high IRQ rates
[deliverable/linux.git] / arch / x86 / kernel / cpu / perf_counter.c
1 /*
2 * Performance counter x86 architecture code
3 *
4 * Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
6 *
7 * For licencing details see kernel-base/COPYING
8 */
9
10 #include <linux/perf_counter.h>
11 #include <linux/capability.h>
12 #include <linux/notifier.h>
13 #include <linux/hardirq.h>
14 #include <linux/kprobes.h>
15 #include <linux/module.h>
16 #include <linux/kdebug.h>
17 #include <linux/sched.h>
18
19 #include <asm/perf_counter.h>
20 #include <asm/apic.h>
21
22 static bool perf_counters_initialized __read_mostly;
23
24 /*
25 * Number of (generic) HW counters:
26 */
27 static int nr_counters_generic __read_mostly;
28 static u64 perf_counter_mask __read_mostly;
29 static u64 counter_value_mask __read_mostly;
30
31 static int nr_counters_fixed __read_mostly;
32
33 struct cpu_hw_counters {
34 struct perf_counter *counters[X86_PMC_IDX_MAX];
35 unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
36 u64 last_interrupt;
37 u64 global_enable;
38 int throttled;
39 };
40
41 /*
42 * Intel PerfMon v3. Used on Core2 and later.
43 */
44 static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters);
45
46 static const int intel_perfmon_event_map[] =
47 {
48 [PERF_COUNT_CPU_CYCLES] = 0x003c,
49 [PERF_COUNT_INSTRUCTIONS] = 0x00c0,
50 [PERF_COUNT_CACHE_REFERENCES] = 0x4f2e,
51 [PERF_COUNT_CACHE_MISSES] = 0x412e,
52 [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4,
53 [PERF_COUNT_BRANCH_MISSES] = 0x00c5,
54 [PERF_COUNT_BUS_CYCLES] = 0x013c,
55 };
56
57 static const int max_intel_perfmon_events = ARRAY_SIZE(intel_perfmon_event_map);
58
59 /*
60 * Propagate counter elapsed time into the generic counter.
61 * Can only be executed on the CPU where the counter is active.
62 * Returns the delta events processed.
63 */
64 static void
65 x86_perf_counter_update(struct perf_counter *counter,
66 struct hw_perf_counter *hwc, int idx)
67 {
68 u64 prev_raw_count, new_raw_count, delta;
69
70 /*
71 * Careful: an NMI might modify the previous counter value.
72 *
73 * Our tactic to handle this is to first atomically read and
74 * exchange a new raw count - then add that new-prev delta
75 * count to the generic counter atomically:
76 */
77 again:
78 prev_raw_count = atomic64_read(&hwc->prev_count);
79 rdmsrl(hwc->counter_base + idx, new_raw_count);
80
81 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
82 new_raw_count) != prev_raw_count)
83 goto again;
84
85 /*
86 * Now we have the new raw value and have updated the prev
87 * timestamp already. We can now calculate the elapsed delta
88 * (counter-)time and add that to the generic counter.
89 *
90 * Careful, not all hw sign-extends above the physical width
91 * of the count, so we do that by clipping the delta to 32 bits:
92 */
93 delta = (u64)(u32)((s32)new_raw_count - (s32)prev_raw_count);
94
95 atomic64_add(delta, &counter->count);
96 atomic64_sub(delta, &hwc->period_left);
97 }
98
99 /*
100 * Setup the hardware configuration for a given hw_event_type
101 */
102 static int __hw_perf_counter_init(struct perf_counter *counter)
103 {
104 struct perf_counter_hw_event *hw_event = &counter->hw_event;
105 struct hw_perf_counter *hwc = &counter->hw;
106
107 if (unlikely(!perf_counters_initialized))
108 return -EINVAL;
109
110 /*
111 * Count user events, and generate PMC IRQs:
112 * (keep 'enabled' bit clear for now)
113 */
114 hwc->config = ARCH_PERFMON_EVENTSEL_USR | ARCH_PERFMON_EVENTSEL_INT;
115
116 /*
117 * If privileged enough, count OS events too, and allow
118 * NMI events as well:
119 */
120 hwc->nmi = 0;
121 if (capable(CAP_SYS_ADMIN)) {
122 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
123 if (hw_event->nmi)
124 hwc->nmi = 1;
125 }
126
127 hwc->irq_period = hw_event->irq_period;
128 /*
129 * Intel PMCs cannot be accessed sanely above 32 bit width,
130 * so we install an artificial 1<<31 period regardless of
131 * the generic counter period:
132 */
133 if ((s64)hwc->irq_period <= 0 || hwc->irq_period > 0x7FFFFFFF)
134 hwc->irq_period = 0x7FFFFFFF;
135
136 atomic64_set(&hwc->period_left, hwc->irq_period);
137
138 /*
139 * Raw event type provide the config in the event structure
140 */
141 if (hw_event->raw) {
142 hwc->config |= hw_event->type;
143 } else {
144 if (hw_event->type >= max_intel_perfmon_events)
145 return -EINVAL;
146 /*
147 * The generic map:
148 */
149 hwc->config |= intel_perfmon_event_map[hw_event->type];
150 }
151 counter->wakeup_pending = 0;
152
153 return 0;
154 }
155
156 u64 hw_perf_save_disable(void)
157 {
158 u64 ctrl;
159
160 if (unlikely(!perf_counters_initialized))
161 return 0;
162
163 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
164 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
165
166 return ctrl;
167 }
168 EXPORT_SYMBOL_GPL(hw_perf_save_disable);
169
170 void hw_perf_restore(u64 ctrl)
171 {
172 if (unlikely(!perf_counters_initialized))
173 return;
174
175 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
176 }
177 EXPORT_SYMBOL_GPL(hw_perf_restore);
178
179 static inline void
180 __pmc_fixed_disable(struct perf_counter *counter,
181 struct hw_perf_counter *hwc, unsigned int __idx)
182 {
183 int idx = __idx - X86_PMC_IDX_FIXED;
184 u64 ctrl_val, mask;
185 int err;
186
187 mask = 0xfULL << (idx * 4);
188
189 rdmsrl(hwc->config_base, ctrl_val);
190 ctrl_val &= ~mask;
191 err = checking_wrmsrl(hwc->config_base, ctrl_val);
192 }
193
194 static inline void
195 __pmc_generic_disable(struct perf_counter *counter,
196 struct hw_perf_counter *hwc, unsigned int idx)
197 {
198 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
199 __pmc_fixed_disable(counter, hwc, idx);
200 else
201 wrmsr_safe(hwc->config_base + idx, hwc->config, 0);
202 }
203
204 static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]);
205
206 /*
207 * Set the next IRQ period, based on the hwc->period_left value.
208 * To be called with the counter disabled in hw:
209 */
210 static void
211 __hw_perf_counter_set_period(struct perf_counter *counter,
212 struct hw_perf_counter *hwc, int idx)
213 {
214 s64 left = atomic64_read(&hwc->period_left);
215 s32 period = hwc->irq_period;
216 int err;
217
218 /*
219 * If we are way outside a reasoable range then just skip forward:
220 */
221 if (unlikely(left <= -period)) {
222 left = period;
223 atomic64_set(&hwc->period_left, left);
224 }
225
226 if (unlikely(left <= 0)) {
227 left += period;
228 atomic64_set(&hwc->period_left, left);
229 }
230
231 per_cpu(prev_left[idx], smp_processor_id()) = left;
232
233 /*
234 * The hw counter starts counting from this counter offset,
235 * mark it to be able to extra future deltas:
236 */
237 atomic64_set(&hwc->prev_count, (u64)-left);
238
239 err = checking_wrmsrl(hwc->counter_base + idx,
240 (u64)(-left) & counter_value_mask);
241 }
242
243 static inline void
244 __pmc_fixed_enable(struct perf_counter *counter,
245 struct hw_perf_counter *hwc, unsigned int __idx)
246 {
247 int idx = __idx - X86_PMC_IDX_FIXED;
248 u64 ctrl_val, bits, mask;
249 int err;
250
251 /*
252 * Enable IRQ generation (0x8) and ring-3 counting (0x2),
253 * and enable ring-0 counting if allowed:
254 */
255 bits = 0x8ULL | 0x2ULL;
256 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
257 bits |= 0x1;
258 bits <<= (idx * 4);
259 mask = 0xfULL << (idx * 4);
260
261 rdmsrl(hwc->config_base, ctrl_val);
262 ctrl_val &= ~mask;
263 ctrl_val |= bits;
264 err = checking_wrmsrl(hwc->config_base, ctrl_val);
265 }
266
267 static void
268 __pmc_generic_enable(struct perf_counter *counter,
269 struct hw_perf_counter *hwc, int idx)
270 {
271 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
272 __pmc_fixed_enable(counter, hwc, idx);
273 else
274 wrmsr(hwc->config_base + idx,
275 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE, 0);
276 }
277
278 static int
279 fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
280 {
281 unsigned int event;
282
283 if (unlikely(hwc->nmi))
284 return -1;
285
286 event = hwc->config & ARCH_PERFMON_EVENT_MASK;
287
288 if (unlikely(event == intel_perfmon_event_map[PERF_COUNT_INSTRUCTIONS]))
289 return X86_PMC_IDX_FIXED_INSTRUCTIONS;
290 if (unlikely(event == intel_perfmon_event_map[PERF_COUNT_CPU_CYCLES]))
291 return X86_PMC_IDX_FIXED_CPU_CYCLES;
292 if (unlikely(event == intel_perfmon_event_map[PERF_COUNT_BUS_CYCLES]))
293 return X86_PMC_IDX_FIXED_BUS_CYCLES;
294
295 return -1;
296 }
297
298 /*
299 * Find a PMC slot for the freshly enabled / scheduled in counter:
300 */
301 static int pmc_generic_enable(struct perf_counter *counter)
302 {
303 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
304 struct hw_perf_counter *hwc = &counter->hw;
305 int idx;
306
307 idx = fixed_mode_idx(counter, hwc);
308 if (idx >= 0) {
309 /*
310 * Try to get the fixed counter, if that is already taken
311 * then try to get a generic counter:
312 */
313 if (test_and_set_bit(idx, cpuc->used))
314 goto try_generic;
315
316 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
317 /*
318 * We set it so that counter_base + idx in wrmsr/rdmsr maps to
319 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
320 */
321 hwc->counter_base =
322 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
323 hwc->idx = idx;
324 } else {
325 idx = hwc->idx;
326 /* Try to get the previous generic counter again */
327 if (test_and_set_bit(idx, cpuc->used)) {
328 try_generic:
329 idx = find_first_zero_bit(cpuc->used, nr_counters_generic);
330 if (idx == nr_counters_generic)
331 return -EAGAIN;
332
333 set_bit(idx, cpuc->used);
334 hwc->idx = idx;
335 }
336 hwc->config_base = MSR_ARCH_PERFMON_EVENTSEL0;
337 hwc->counter_base = MSR_ARCH_PERFMON_PERFCTR0;
338 }
339
340 perf_counters_lapic_init(hwc->nmi);
341
342 __pmc_generic_disable(counter, hwc, idx);
343
344 cpuc->counters[idx] = counter;
345 /*
346 * Make it visible before enabling the hw:
347 */
348 smp_wmb();
349
350 __hw_perf_counter_set_period(counter, hwc, idx);
351 __pmc_generic_enable(counter, hwc, idx);
352
353 return 0;
354 }
355
356 void perf_counter_print_debug(void)
357 {
358 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
359 struct cpu_hw_counters *cpuc;
360 int cpu, idx;
361
362 if (!nr_counters_generic)
363 return;
364
365 local_irq_disable();
366
367 cpu = smp_processor_id();
368 cpuc = &per_cpu(cpu_hw_counters, cpu);
369
370 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
371 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
372 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
373 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
374
375 printk(KERN_INFO "\n");
376 printk(KERN_INFO "CPU#%d: ctrl: %016llx\n", cpu, ctrl);
377 printk(KERN_INFO "CPU#%d: status: %016llx\n", cpu, status);
378 printk(KERN_INFO "CPU#%d: overflow: %016llx\n", cpu, overflow);
379 printk(KERN_INFO "CPU#%d: fixed: %016llx\n", cpu, fixed);
380 printk(KERN_INFO "CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used);
381
382 for (idx = 0; idx < nr_counters_generic; idx++) {
383 rdmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, pmc_ctrl);
384 rdmsrl(MSR_ARCH_PERFMON_PERFCTR0 + idx, pmc_count);
385
386 prev_left = per_cpu(prev_left[idx], cpu);
387
388 printk(KERN_INFO "CPU#%d: gen-PMC%d ctrl: %016llx\n",
389 cpu, idx, pmc_ctrl);
390 printk(KERN_INFO "CPU#%d: gen-PMC%d count: %016llx\n",
391 cpu, idx, pmc_count);
392 printk(KERN_INFO "CPU#%d: gen-PMC%d left: %016llx\n",
393 cpu, idx, prev_left);
394 }
395 for (idx = 0; idx < nr_counters_fixed; idx++) {
396 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
397
398 printk(KERN_INFO "CPU#%d: fixed-PMC%d count: %016llx\n",
399 cpu, idx, pmc_count);
400 }
401 local_irq_enable();
402 }
403
404 static void pmc_generic_disable(struct perf_counter *counter)
405 {
406 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
407 struct hw_perf_counter *hwc = &counter->hw;
408 unsigned int idx = hwc->idx;
409
410 __pmc_generic_disable(counter, hwc, idx);
411
412 clear_bit(idx, cpuc->used);
413 cpuc->counters[idx] = NULL;
414 /*
415 * Make sure the cleared pointer becomes visible before we
416 * (potentially) free the counter:
417 */
418 smp_wmb();
419
420 /*
421 * Drain the remaining delta count out of a counter
422 * that we are disabling:
423 */
424 x86_perf_counter_update(counter, hwc, idx);
425 }
426
427 static void perf_store_irq_data(struct perf_counter *counter, u64 data)
428 {
429 struct perf_data *irqdata = counter->irqdata;
430
431 if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) {
432 irqdata->overrun++;
433 } else {
434 u64 *p = (u64 *) &irqdata->data[irqdata->len];
435
436 *p = data;
437 irqdata->len += sizeof(u64);
438 }
439 }
440
441 /*
442 * Save and restart an expired counter. Called by NMI contexts,
443 * so it has to be careful about preempting normal counter ops:
444 */
445 static void perf_save_and_restart(struct perf_counter *counter)
446 {
447 struct hw_perf_counter *hwc = &counter->hw;
448 int idx = hwc->idx;
449
450 x86_perf_counter_update(counter, hwc, idx);
451 __hw_perf_counter_set_period(counter, hwc, idx);
452
453 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
454 __pmc_generic_enable(counter, hwc, idx);
455 }
456
457 static void
458 perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown)
459 {
460 struct perf_counter *counter, *group_leader = sibling->group_leader;
461
462 /*
463 * Store sibling timestamps (if any):
464 */
465 list_for_each_entry(counter, &group_leader->sibling_list, list_entry) {
466
467 x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
468 perf_store_irq_data(sibling, counter->hw_event.type);
469 perf_store_irq_data(sibling, atomic64_read(&counter->count));
470 }
471 }
472
473 /*
474 * This handler is triggered by the local APIC, so the APIC IRQ handling
475 * rules apply:
476 */
477 static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
478 {
479 int bit, cpu = smp_processor_id();
480 u64 ack, status, now;
481 struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu);
482
483 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable);
484
485 /* Disable counters globally */
486 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
487 ack_APIC_irq();
488
489 now = sched_clock();
490 if (now - cpuc->last_interrupt < PERFMON_MIN_PERIOD_NS)
491 cpuc->throttled = 1;
492 cpuc->last_interrupt = now;
493
494 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
495 if (!status)
496 goto out;
497
498 again:
499 ack = status;
500 for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
501 struct perf_counter *counter = cpuc->counters[bit];
502
503 clear_bit(bit, (unsigned long *) &status);
504 if (!counter)
505 continue;
506
507 perf_save_and_restart(counter);
508
509 switch (counter->hw_event.record_type) {
510 case PERF_RECORD_SIMPLE:
511 continue;
512 case PERF_RECORD_IRQ:
513 perf_store_irq_data(counter, instruction_pointer(regs));
514 break;
515 case PERF_RECORD_GROUP:
516 perf_handle_group(counter, &status, &ack);
517 break;
518 }
519 /*
520 * From NMI context we cannot call into the scheduler to
521 * do a task wakeup - but we mark these generic as
522 * wakeup_pending and initate a wakeup callback:
523 */
524 if (nmi) {
525 counter->wakeup_pending = 1;
526 set_tsk_thread_flag(current, TIF_PERF_COUNTERS);
527 } else {
528 wake_up(&counter->waitq);
529 }
530 }
531
532 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
533
534 /*
535 * Repeat if there is more work to be done:
536 */
537 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
538 if (status)
539 goto again;
540 out:
541 /*
542 * Restore - do not reenable when global enable is off or throttled:
543 */
544 if (!cpuc->throttled)
545 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable);
546 }
547
548 void perf_counter_unthrottle(void)
549 {
550 struct cpu_hw_counters *cpuc;
551
552 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
553 return;
554
555 if (unlikely(!perf_counters_initialized))
556 return;
557
558 cpuc = &per_cpu(cpu_hw_counters, smp_processor_id());
559 if (cpuc->throttled) {
560 if (printk_ratelimit())
561 printk(KERN_WARNING "PERFMON: max event frequency exceeded!\n");
562 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable);
563 cpuc->throttled = 0;
564 }
565 }
566
567 void smp_perf_counter_interrupt(struct pt_regs *regs)
568 {
569 irq_enter();
570 inc_irq_stat(apic_perf_irqs);
571 apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
572 __smp_perf_counter_interrupt(regs, 0);
573
574 irq_exit();
575 }
576
577 /*
578 * This handler is triggered by NMI contexts:
579 */
580 void perf_counter_notify(struct pt_regs *regs)
581 {
582 struct cpu_hw_counters *cpuc;
583 unsigned long flags;
584 int bit, cpu;
585
586 local_irq_save(flags);
587 cpu = smp_processor_id();
588 cpuc = &per_cpu(cpu_hw_counters, cpu);
589
590 for_each_bit(bit, cpuc->used, X86_PMC_IDX_MAX) {
591 struct perf_counter *counter = cpuc->counters[bit];
592
593 if (!counter)
594 continue;
595
596 if (counter->wakeup_pending) {
597 counter->wakeup_pending = 0;
598 wake_up(&counter->waitq);
599 }
600 }
601
602 local_irq_restore(flags);
603 }
604
605 void __cpuinit perf_counters_lapic_init(int nmi)
606 {
607 u32 apic_val;
608
609 if (!perf_counters_initialized)
610 return;
611 /*
612 * Enable the performance counter vector in the APIC LVT:
613 */
614 apic_val = apic_read(APIC_LVTERR);
615
616 apic_write(APIC_LVTERR, apic_val | APIC_LVT_MASKED);
617 if (nmi)
618 apic_write(APIC_LVTPC, APIC_DM_NMI);
619 else
620 apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
621 apic_write(APIC_LVTERR, apic_val);
622 }
623
624 static int __kprobes
625 perf_counter_nmi_handler(struct notifier_block *self,
626 unsigned long cmd, void *__args)
627 {
628 struct die_args *args = __args;
629 struct pt_regs *regs;
630
631 if (likely(cmd != DIE_NMI_IPI))
632 return NOTIFY_DONE;
633
634 regs = args->regs;
635
636 apic_write(APIC_LVTPC, APIC_DM_NMI);
637 __smp_perf_counter_interrupt(regs, 1);
638
639 return NOTIFY_STOP;
640 }
641
642 static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
643 .notifier_call = perf_counter_nmi_handler
644 };
645
646 void __init init_hw_perf_counters(void)
647 {
648 union cpuid10_eax eax;
649 unsigned int ebx;
650 unsigned int unused;
651 union cpuid10_edx edx;
652
653 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
654 return;
655
656 /*
657 * Check whether the Architectural PerfMon supports
658 * Branch Misses Retired Event or not.
659 */
660 cpuid(10, &eax.full, &ebx, &unused, &edx.full);
661 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
662 return;
663
664 printk(KERN_INFO "Intel Performance Monitoring support detected.\n");
665
666 printk(KERN_INFO "... version: %d\n", eax.split.version_id);
667 printk(KERN_INFO "... num counters: %d\n", eax.split.num_counters);
668 nr_counters_generic = eax.split.num_counters;
669 if (nr_counters_generic > X86_PMC_MAX_GENERIC) {
670 nr_counters_generic = X86_PMC_MAX_GENERIC;
671 WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!",
672 nr_counters_generic, X86_PMC_MAX_GENERIC);
673 }
674 perf_counter_mask = (1 << nr_counters_generic) - 1;
675 perf_max_counters = nr_counters_generic;
676
677 printk(KERN_INFO "... bit width: %d\n", eax.split.bit_width);
678 counter_value_mask = (1ULL << eax.split.bit_width) - 1;
679 printk(KERN_INFO "... value mask: %016Lx\n", counter_value_mask);
680
681 printk(KERN_INFO "... mask length: %d\n", eax.split.mask_length);
682
683 nr_counters_fixed = edx.split.num_counters_fixed;
684 if (nr_counters_fixed > X86_PMC_MAX_FIXED) {
685 nr_counters_fixed = X86_PMC_MAX_FIXED;
686 WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!",
687 nr_counters_fixed, X86_PMC_MAX_FIXED);
688 }
689 printk(KERN_INFO "... fixed counters: %d\n", nr_counters_fixed);
690
691 perf_counter_mask |= ((1LL << nr_counters_fixed)-1) << X86_PMC_IDX_FIXED;
692
693 printk(KERN_INFO "... counter mask: %016Lx\n", perf_counter_mask);
694 perf_counters_initialized = true;
695
696 perf_counters_lapic_init(0);
697 register_die_notifier(&perf_counter_nmi_notifier);
698 }
699
700 static void pmc_generic_read(struct perf_counter *counter)
701 {
702 x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
703 }
704
705 static const struct hw_perf_counter_ops x86_perf_counter_ops = {
706 .enable = pmc_generic_enable,
707 .disable = pmc_generic_disable,
708 .read = pmc_generic_read,
709 };
710
711 const struct hw_perf_counter_ops *
712 hw_perf_counter_init(struct perf_counter *counter)
713 {
714 int err;
715
716 err = __hw_perf_counter_init(counter);
717 if (err)
718 return NULL;
719
720 return &x86_perf_counter_ops;
721 }
This page took 0.046718 seconds and 6 git commands to generate.