2 * Performance counter x86 architecture code
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
10 * For licencing details see kernel-base/COPYING
13 #include <linux/perf_counter.h>
14 #include <linux/capability.h>
15 #include <linux/notifier.h>
16 #include <linux/hardirq.h>
17 #include <linux/kprobes.h>
18 #include <linux/module.h>
19 #include <linux/kdebug.h>
20 #include <linux/sched.h>
21 #include <linux/uaccess.h>
24 #include <asm/stacktrace.h>
27 static u64 perf_counter_mask __read_mostly
;
29 struct cpu_hw_counters
{
30 struct perf_counter
*counters
[X86_PMC_IDX_MAX
];
31 unsigned long used_mask
[BITS_TO_LONGS(X86_PMC_IDX_MAX
)];
32 unsigned long active_mask
[BITS_TO_LONGS(X86_PMC_IDX_MAX
)];
33 unsigned long interrupts
;
38 * struct x86_pmu - generic x86 pmu
43 int (*handle_irq
)(struct pt_regs
*);
44 void (*disable_all
)(void);
45 void (*enable_all
)(void);
46 void (*enable
)(struct hw_perf_counter
*, int);
47 void (*disable
)(struct hw_perf_counter
*, int);
50 u64 (*event_map
)(int);
51 u64 (*raw_event
)(u64
);
54 int num_counters_fixed
;
61 static struct x86_pmu x86_pmu __read_mostly
;
63 static DEFINE_PER_CPU(struct cpu_hw_counters
, cpu_hw_counters
) = {
68 * Intel PerfMon v3. Used on Core2 and later.
70 static const u64 intel_perfmon_event_map
[] =
72 [PERF_COUNT_CPU_CYCLES
] = 0x003c,
73 [PERF_COUNT_INSTRUCTIONS
] = 0x00c0,
74 [PERF_COUNT_CACHE_REFERENCES
] = 0x4f2e,
75 [PERF_COUNT_CACHE_MISSES
] = 0x412e,
76 [PERF_COUNT_BRANCH_INSTRUCTIONS
] = 0x00c4,
77 [PERF_COUNT_BRANCH_MISSES
] = 0x00c5,
78 [PERF_COUNT_BUS_CYCLES
] = 0x013c,
81 static u64
intel_pmu_event_map(int event
)
83 return intel_perfmon_event_map
[event
];
87 * Generalized hw caching related event table, filled
88 * in on a per model basis. A value of 0 means
89 * 'not supported', -1 means 'event makes no sense on
90 * this CPU', any other value means the raw event
94 #define C(x) PERF_COUNT_HW_CACHE_##x
96 static u64 __read_mostly hw_cache_event_ids
97 [PERF_COUNT_HW_CACHE_MAX
]
98 [PERF_COUNT_HW_CACHE_OP_MAX
]
99 [PERF_COUNT_HW_CACHE_RESULT_MAX
];
101 static const u64 nehalem_hw_cache_event_ids
102 [PERF_COUNT_HW_CACHE_MAX
]
103 [PERF_COUNT_HW_CACHE_OP_MAX
]
104 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
108 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
109 [ C(RESULT_MISS
) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
112 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
113 [ C(RESULT_MISS
) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
115 [ C(OP_PREFETCH
) ] = {
116 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
117 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
122 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
123 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
126 [ C(RESULT_ACCESS
) ] = -1,
127 [ C(RESULT_MISS
) ] = -1,
129 [ C(OP_PREFETCH
) ] = {
130 [ C(RESULT_ACCESS
) ] = 0x0,
131 [ C(RESULT_MISS
) ] = 0x0,
136 [ C(RESULT_ACCESS
) ] = 0x0324, /* L2_RQSTS.LOADS */
137 [ C(RESULT_MISS
) ] = 0x0224, /* L2_RQSTS.LD_MISS */
140 [ C(RESULT_ACCESS
) ] = 0x0c24, /* L2_RQSTS.RFOS */
141 [ C(RESULT_MISS
) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
143 [ C(OP_PREFETCH
) ] = {
144 [ C(RESULT_ACCESS
) ] = 0xc024, /* L2_RQSTS.PREFETCHES */
145 [ C(RESULT_MISS
) ] = 0x8024, /* L2_RQSTS.PREFETCH_MISS */
150 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
151 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
154 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
155 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
157 [ C(OP_PREFETCH
) ] = {
158 [ C(RESULT_ACCESS
) ] = 0x0,
159 [ C(RESULT_MISS
) ] = 0x0,
164 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
165 [ C(RESULT_MISS
) ] = 0x20c8, /* ITLB_MISS_RETIRED */
168 [ C(RESULT_ACCESS
) ] = -1,
169 [ C(RESULT_MISS
) ] = -1,
171 [ C(OP_PREFETCH
) ] = {
172 [ C(RESULT_ACCESS
) ] = -1,
173 [ C(RESULT_MISS
) ] = -1,
178 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
179 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
182 [ C(RESULT_ACCESS
) ] = -1,
183 [ C(RESULT_MISS
) ] = -1,
185 [ C(OP_PREFETCH
) ] = {
186 [ C(RESULT_ACCESS
) ] = -1,
187 [ C(RESULT_MISS
) ] = -1,
192 static const u64 core2_hw_cache_event_ids
193 [PERF_COUNT_HW_CACHE_MAX
]
194 [PERF_COUNT_HW_CACHE_OP_MAX
]
195 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
199 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
200 [ C(RESULT_MISS
) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
203 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
204 [ C(RESULT_MISS
) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
206 [ C(OP_PREFETCH
) ] = {
207 [ C(RESULT_ACCESS
) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
208 [ C(RESULT_MISS
) ] = 0,
213 [ C(RESULT_ACCESS
) ] = 0x0080, /* L1I.READS */
214 [ C(RESULT_MISS
) ] = 0x0081, /* L1I.MISSES */
217 [ C(RESULT_ACCESS
) ] = -1,
218 [ C(RESULT_MISS
) ] = -1,
220 [ C(OP_PREFETCH
) ] = {
221 [ C(RESULT_ACCESS
) ] = 0,
222 [ C(RESULT_MISS
) ] = 0,
227 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
228 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
231 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
232 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
234 [ C(OP_PREFETCH
) ] = {
235 [ C(RESULT_ACCESS
) ] = 0,
236 [ C(RESULT_MISS
) ] = 0,
241 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
242 [ C(RESULT_MISS
) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
245 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
246 [ C(RESULT_MISS
) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
248 [ C(OP_PREFETCH
) ] = {
249 [ C(RESULT_ACCESS
) ] = 0,
250 [ C(RESULT_MISS
) ] = 0,
255 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
256 [ C(RESULT_MISS
) ] = 0x1282, /* ITLBMISSES */
259 [ C(RESULT_ACCESS
) ] = -1,
260 [ C(RESULT_MISS
) ] = -1,
262 [ C(OP_PREFETCH
) ] = {
263 [ C(RESULT_ACCESS
) ] = -1,
264 [ C(RESULT_MISS
) ] = -1,
269 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
270 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
273 [ C(RESULT_ACCESS
) ] = -1,
274 [ C(RESULT_MISS
) ] = -1,
276 [ C(OP_PREFETCH
) ] = {
277 [ C(RESULT_ACCESS
) ] = -1,
278 [ C(RESULT_MISS
) ] = -1,
283 static const u64 atom_hw_cache_event_ids
284 [PERF_COUNT_HW_CACHE_MAX
]
285 [PERF_COUNT_HW_CACHE_OP_MAX
]
286 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
290 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE.LD */
291 [ C(RESULT_MISS
) ] = 0,
294 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE.ST */
295 [ C(RESULT_MISS
) ] = 0,
297 [ C(OP_PREFETCH
) ] = {
298 [ C(RESULT_ACCESS
) ] = 0x0,
299 [ C(RESULT_MISS
) ] = 0,
304 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
305 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
308 [ C(RESULT_ACCESS
) ] = -1,
309 [ C(RESULT_MISS
) ] = -1,
311 [ C(OP_PREFETCH
) ] = {
312 [ C(RESULT_ACCESS
) ] = 0,
313 [ C(RESULT_MISS
) ] = 0,
318 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
319 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
322 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
323 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
325 [ C(OP_PREFETCH
) ] = {
326 [ C(RESULT_ACCESS
) ] = 0,
327 [ C(RESULT_MISS
) ] = 0,
332 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
333 [ C(RESULT_MISS
) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
336 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
337 [ C(RESULT_MISS
) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
339 [ C(OP_PREFETCH
) ] = {
340 [ C(RESULT_ACCESS
) ] = 0,
341 [ C(RESULT_MISS
) ] = 0,
346 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
347 [ C(RESULT_MISS
) ] = 0x0282, /* ITLB.MISSES */
350 [ C(RESULT_ACCESS
) ] = -1,
351 [ C(RESULT_MISS
) ] = -1,
353 [ C(OP_PREFETCH
) ] = {
354 [ C(RESULT_ACCESS
) ] = -1,
355 [ C(RESULT_MISS
) ] = -1,
360 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
361 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
364 [ C(RESULT_ACCESS
) ] = -1,
365 [ C(RESULT_MISS
) ] = -1,
367 [ C(OP_PREFETCH
) ] = {
368 [ C(RESULT_ACCESS
) ] = -1,
369 [ C(RESULT_MISS
) ] = -1,
374 static u64
intel_pmu_raw_event(u64 event
)
376 #define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
377 #define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
378 #define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
379 #define CORE_EVNTSEL_INV_MASK 0x00800000ULL
380 #define CORE_EVNTSEL_COUNTER_MASK 0xFF000000ULL
382 #define CORE_EVNTSEL_MASK \
383 (CORE_EVNTSEL_EVENT_MASK | \
384 CORE_EVNTSEL_UNIT_MASK | \
385 CORE_EVNTSEL_EDGE_MASK | \
386 CORE_EVNTSEL_INV_MASK | \
387 CORE_EVNTSEL_COUNTER_MASK)
389 return event
& CORE_EVNTSEL_MASK
;
392 static const u64 amd_0f_hw_cache_event_ids
393 [PERF_COUNT_HW_CACHE_MAX
]
394 [PERF_COUNT_HW_CACHE_OP_MAX
]
395 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
399 [ C(RESULT_ACCESS
) ] = 0,
400 [ C(RESULT_MISS
) ] = 0,
403 [ C(RESULT_ACCESS
) ] = 0,
404 [ C(RESULT_MISS
) ] = 0,
406 [ C(OP_PREFETCH
) ] = {
407 [ C(RESULT_ACCESS
) ] = 0,
408 [ C(RESULT_MISS
) ] = 0,
413 [ C(RESULT_ACCESS
) ] = 0x0080, /* Instruction cache fetches */
414 [ C(RESULT_MISS
) ] = 0x0081, /* Instruction cache misses */
417 [ C(RESULT_ACCESS
) ] = -1,
418 [ C(RESULT_MISS
) ] = -1,
420 [ C(OP_PREFETCH
) ] = {
421 [ C(RESULT_ACCESS
) ] = 0,
422 [ C(RESULT_MISS
) ] = 0,
427 [ C(RESULT_ACCESS
) ] = 0,
428 [ C(RESULT_MISS
) ] = 0,
431 [ C(RESULT_ACCESS
) ] = 0,
432 [ C(RESULT_MISS
) ] = 0,
434 [ C(OP_PREFETCH
) ] = {
435 [ C(RESULT_ACCESS
) ] = 0,
436 [ C(RESULT_MISS
) ] = 0,
441 [ C(RESULT_ACCESS
) ] = 0,
442 [ C(RESULT_MISS
) ] = 0,
445 [ C(RESULT_ACCESS
) ] = 0,
446 [ C(RESULT_MISS
) ] = 0,
448 [ C(OP_PREFETCH
) ] = {
449 [ C(RESULT_ACCESS
) ] = 0,
450 [ C(RESULT_MISS
) ] = 0,
455 [ C(RESULT_ACCESS
) ] = 0x0080, /* Instruction fecthes */
456 [ C(RESULT_MISS
) ] = 0x0085, /* Instr. fetch ITLB misses */
459 [ C(RESULT_ACCESS
) ] = -1,
460 [ C(RESULT_MISS
) ] = -1,
462 [ C(OP_PREFETCH
) ] = {
463 [ C(RESULT_ACCESS
) ] = -1,
464 [ C(RESULT_MISS
) ] = -1,
469 [ C(RESULT_ACCESS
) ] = 0x00c2, /* Retired Branch Instr. */
470 [ C(RESULT_MISS
) ] = 0x00c3, /* Retired Mispredicted BI */
473 [ C(RESULT_ACCESS
) ] = -1,
474 [ C(RESULT_MISS
) ] = -1,
476 [ C(OP_PREFETCH
) ] = {
477 [ C(RESULT_ACCESS
) ] = -1,
478 [ C(RESULT_MISS
) ] = -1,
484 * AMD Performance Monitor K7 and later.
486 static const u64 amd_perfmon_event_map
[] =
488 [PERF_COUNT_CPU_CYCLES
] = 0x0076,
489 [PERF_COUNT_INSTRUCTIONS
] = 0x00c0,
490 [PERF_COUNT_CACHE_REFERENCES
] = 0x0080,
491 [PERF_COUNT_CACHE_MISSES
] = 0x0081,
492 [PERF_COUNT_BRANCH_INSTRUCTIONS
] = 0x00c4,
493 [PERF_COUNT_BRANCH_MISSES
] = 0x00c5,
496 static u64
amd_pmu_event_map(int event
)
498 return amd_perfmon_event_map
[event
];
501 static u64
amd_pmu_raw_event(u64 event
)
503 #define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
504 #define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
505 #define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
506 #define K7_EVNTSEL_INV_MASK 0x000800000ULL
507 #define K7_EVNTSEL_COUNTER_MASK 0x0FF000000ULL
509 #define K7_EVNTSEL_MASK \
510 (K7_EVNTSEL_EVENT_MASK | \
511 K7_EVNTSEL_UNIT_MASK | \
512 K7_EVNTSEL_EDGE_MASK | \
513 K7_EVNTSEL_INV_MASK | \
514 K7_EVNTSEL_COUNTER_MASK)
516 return event
& K7_EVNTSEL_MASK
;
520 * Propagate counter elapsed time into the generic counter.
521 * Can only be executed on the CPU where the counter is active.
522 * Returns the delta events processed.
525 x86_perf_counter_update(struct perf_counter
*counter
,
526 struct hw_perf_counter
*hwc
, int idx
)
528 int shift
= 64 - x86_pmu
.counter_bits
;
529 u64 prev_raw_count
, new_raw_count
;
533 * Careful: an NMI might modify the previous counter value.
535 * Our tactic to handle this is to first atomically read and
536 * exchange a new raw count - then add that new-prev delta
537 * count to the generic counter atomically:
540 prev_raw_count
= atomic64_read(&hwc
->prev_count
);
541 rdmsrl(hwc
->counter_base
+ idx
, new_raw_count
);
543 if (atomic64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
544 new_raw_count
) != prev_raw_count
)
548 * Now we have the new raw value and have updated the prev
549 * timestamp already. We can now calculate the elapsed delta
550 * (counter-)time and add that to the generic counter.
552 * Careful, not all hw sign-extends above the physical width
555 delta
= (new_raw_count
<< shift
) - (prev_raw_count
<< shift
);
558 atomic64_add(delta
, &counter
->count
);
559 atomic64_sub(delta
, &hwc
->period_left
);
561 return new_raw_count
;
564 static atomic_t active_counters
;
565 static DEFINE_MUTEX(pmc_reserve_mutex
);
567 static bool reserve_pmc_hardware(void)
571 if (nmi_watchdog
== NMI_LOCAL_APIC
)
572 disable_lapic_nmi_watchdog();
574 for (i
= 0; i
< x86_pmu
.num_counters
; i
++) {
575 if (!reserve_perfctr_nmi(x86_pmu
.perfctr
+ i
))
579 for (i
= 0; i
< x86_pmu
.num_counters
; i
++) {
580 if (!reserve_evntsel_nmi(x86_pmu
.eventsel
+ i
))
587 for (i
--; i
>= 0; i
--)
588 release_evntsel_nmi(x86_pmu
.eventsel
+ i
);
590 i
= x86_pmu
.num_counters
;
593 for (i
--; i
>= 0; i
--)
594 release_perfctr_nmi(x86_pmu
.perfctr
+ i
);
596 if (nmi_watchdog
== NMI_LOCAL_APIC
)
597 enable_lapic_nmi_watchdog();
602 static void release_pmc_hardware(void)
606 for (i
= 0; i
< x86_pmu
.num_counters
; i
++) {
607 release_perfctr_nmi(x86_pmu
.perfctr
+ i
);
608 release_evntsel_nmi(x86_pmu
.eventsel
+ i
);
611 if (nmi_watchdog
== NMI_LOCAL_APIC
)
612 enable_lapic_nmi_watchdog();
615 static void hw_perf_counter_destroy(struct perf_counter
*counter
)
617 if (atomic_dec_and_mutex_lock(&active_counters
, &pmc_reserve_mutex
)) {
618 release_pmc_hardware();
619 mutex_unlock(&pmc_reserve_mutex
);
623 static inline int x86_pmu_initialized(void)
625 return x86_pmu
.handle_irq
!= NULL
;
629 set_ext_hw_attr(struct hw_perf_counter
*hwc
, struct perf_counter_attr
*attr
)
631 unsigned int cache_type
, cache_op
, cache_result
;
634 config
= attr
->config
;
636 cache_type
= (config
>> 0) & 0xff;
637 if (cache_type
>= PERF_COUNT_HW_CACHE_MAX
)
640 cache_op
= (config
>> 8) & 0xff;
641 if (cache_op
>= PERF_COUNT_HW_CACHE_OP_MAX
)
644 cache_result
= (config
>> 16) & 0xff;
645 if (cache_result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
648 val
= hw_cache_event_ids
[cache_type
][cache_op
][cache_result
];
662 * Setup the hardware configuration for a given attr_type
664 static int __hw_perf_counter_init(struct perf_counter
*counter
)
666 struct perf_counter_attr
*attr
= &counter
->attr
;
667 struct hw_perf_counter
*hwc
= &counter
->hw
;
670 if (!x86_pmu_initialized())
674 if (!atomic_inc_not_zero(&active_counters
)) {
675 mutex_lock(&pmc_reserve_mutex
);
676 if (atomic_read(&active_counters
) == 0 && !reserve_pmc_hardware())
679 atomic_inc(&active_counters
);
680 mutex_unlock(&pmc_reserve_mutex
);
687 * (keep 'enabled' bit clear for now)
689 hwc
->config
= ARCH_PERFMON_EVENTSEL_INT
;
692 * Count user and OS events unless requested not to.
694 if (!attr
->exclude_user
)
695 hwc
->config
|= ARCH_PERFMON_EVENTSEL_USR
;
696 if (!attr
->exclude_kernel
)
697 hwc
->config
|= ARCH_PERFMON_EVENTSEL_OS
;
699 if (!hwc
->sample_period
)
700 hwc
->sample_period
= x86_pmu
.max_period
;
702 atomic64_set(&hwc
->period_left
, hwc
->sample_period
);
703 counter
->destroy
= hw_perf_counter_destroy
;
706 * Raw event type provide the config in the event structure
708 if (attr
->type
== PERF_TYPE_RAW
) {
709 hwc
->config
|= x86_pmu
.raw_event(attr
->config
);
713 if (attr
->type
== PERF_TYPE_HW_CACHE
)
714 return set_ext_hw_attr(hwc
, attr
);
716 if (attr
->config
>= x86_pmu
.max_events
)
721 hwc
->config
|= x86_pmu
.event_map(attr
->config
);
726 static void intel_pmu_disable_all(void)
728 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0);
731 static void amd_pmu_disable_all(void)
733 struct cpu_hw_counters
*cpuc
= &__get_cpu_var(cpu_hw_counters
);
741 * ensure we write the disable before we start disabling the
742 * counters proper, so that amd_pmu_enable_counter() does the
747 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
750 if (!test_bit(idx
, cpuc
->active_mask
))
752 rdmsrl(MSR_K7_EVNTSEL0
+ idx
, val
);
753 if (!(val
& ARCH_PERFMON_EVENTSEL0_ENABLE
))
755 val
&= ~ARCH_PERFMON_EVENTSEL0_ENABLE
;
756 wrmsrl(MSR_K7_EVNTSEL0
+ idx
, val
);
760 void hw_perf_disable(void)
762 if (!x86_pmu_initialized())
764 return x86_pmu
.disable_all();
767 static void intel_pmu_enable_all(void)
769 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, x86_pmu
.intel_ctrl
);
772 static void amd_pmu_enable_all(void)
774 struct cpu_hw_counters
*cpuc
= &__get_cpu_var(cpu_hw_counters
);
783 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
786 if (!test_bit(idx
, cpuc
->active_mask
))
788 rdmsrl(MSR_K7_EVNTSEL0
+ idx
, val
);
789 if (val
& ARCH_PERFMON_EVENTSEL0_ENABLE
)
791 val
|= ARCH_PERFMON_EVENTSEL0_ENABLE
;
792 wrmsrl(MSR_K7_EVNTSEL0
+ idx
, val
);
796 void hw_perf_enable(void)
798 if (!x86_pmu_initialized())
800 x86_pmu
.enable_all();
803 static inline u64
intel_pmu_get_status(void)
807 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS
, status
);
812 static inline void intel_pmu_ack_status(u64 ack
)
814 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL
, ack
);
817 static inline void x86_pmu_enable_counter(struct hw_perf_counter
*hwc
, int idx
)
820 err
= checking_wrmsrl(hwc
->config_base
+ idx
,
821 hwc
->config
| ARCH_PERFMON_EVENTSEL0_ENABLE
);
824 static inline void x86_pmu_disable_counter(struct hw_perf_counter
*hwc
, int idx
)
827 err
= checking_wrmsrl(hwc
->config_base
+ idx
,
832 intel_pmu_disable_fixed(struct hw_perf_counter
*hwc
, int __idx
)
834 int idx
= __idx
- X86_PMC_IDX_FIXED
;
838 mask
= 0xfULL
<< (idx
* 4);
840 rdmsrl(hwc
->config_base
, ctrl_val
);
842 err
= checking_wrmsrl(hwc
->config_base
, ctrl_val
);
846 intel_pmu_disable_counter(struct hw_perf_counter
*hwc
, int idx
)
848 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
849 intel_pmu_disable_fixed(hwc
, idx
);
853 x86_pmu_disable_counter(hwc
, idx
);
857 amd_pmu_disable_counter(struct hw_perf_counter
*hwc
, int idx
)
859 x86_pmu_disable_counter(hwc
, idx
);
862 static DEFINE_PER_CPU(u64
, prev_left
[X86_PMC_IDX_MAX
]);
865 * Set the next IRQ period, based on the hwc->period_left value.
866 * To be called with the counter disabled in hw:
869 x86_perf_counter_set_period(struct perf_counter
*counter
,
870 struct hw_perf_counter
*hwc
, int idx
)
872 s64 left
= atomic64_read(&hwc
->period_left
);
873 s64 period
= hwc
->sample_period
;
877 * If we are way outside a reasoable range then just skip forward:
879 if (unlikely(left
<= -period
)) {
881 atomic64_set(&hwc
->period_left
, left
);
885 if (unlikely(left
<= 0)) {
887 atomic64_set(&hwc
->period_left
, left
);
891 * Quirk: certain CPUs dont like it if just 1 event is left:
893 if (unlikely(left
< 2))
896 if (left
> x86_pmu
.max_period
)
897 left
= x86_pmu
.max_period
;
899 per_cpu(prev_left
[idx
], smp_processor_id()) = left
;
902 * The hw counter starts counting from this counter offset,
903 * mark it to be able to extra future deltas:
905 atomic64_set(&hwc
->prev_count
, (u64
)-left
);
907 err
= checking_wrmsrl(hwc
->counter_base
+ idx
,
908 (u64
)(-left
) & x86_pmu
.counter_mask
);
914 intel_pmu_enable_fixed(struct hw_perf_counter
*hwc
, int __idx
)
916 int idx
= __idx
- X86_PMC_IDX_FIXED
;
917 u64 ctrl_val
, bits
, mask
;
921 * Enable IRQ generation (0x8),
922 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
926 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_USR
)
928 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_OS
)
931 mask
= 0xfULL
<< (idx
* 4);
933 rdmsrl(hwc
->config_base
, ctrl_val
);
936 err
= checking_wrmsrl(hwc
->config_base
, ctrl_val
);
939 static void intel_pmu_enable_counter(struct hw_perf_counter
*hwc
, int idx
)
941 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
942 intel_pmu_enable_fixed(hwc
, idx
);
946 x86_pmu_enable_counter(hwc
, idx
);
949 static void amd_pmu_enable_counter(struct hw_perf_counter
*hwc
, int idx
)
951 struct cpu_hw_counters
*cpuc
= &__get_cpu_var(cpu_hw_counters
);
954 x86_pmu_enable_counter(hwc
, idx
);
956 x86_pmu_disable_counter(hwc
, idx
);
960 fixed_mode_idx(struct perf_counter
*counter
, struct hw_perf_counter
*hwc
)
964 if (!x86_pmu
.num_counters_fixed
)
967 event
= hwc
->config
& ARCH_PERFMON_EVENT_MASK
;
969 if (unlikely(event
== x86_pmu
.event_map(PERF_COUNT_INSTRUCTIONS
)))
970 return X86_PMC_IDX_FIXED_INSTRUCTIONS
;
971 if (unlikely(event
== x86_pmu
.event_map(PERF_COUNT_CPU_CYCLES
)))
972 return X86_PMC_IDX_FIXED_CPU_CYCLES
;
973 if (unlikely(event
== x86_pmu
.event_map(PERF_COUNT_BUS_CYCLES
)))
974 return X86_PMC_IDX_FIXED_BUS_CYCLES
;
980 * Find a PMC slot for the freshly enabled / scheduled in counter:
982 static int x86_pmu_enable(struct perf_counter
*counter
)
984 struct cpu_hw_counters
*cpuc
= &__get_cpu_var(cpu_hw_counters
);
985 struct hw_perf_counter
*hwc
= &counter
->hw
;
988 idx
= fixed_mode_idx(counter
, hwc
);
991 * Try to get the fixed counter, if that is already taken
992 * then try to get a generic counter:
994 if (test_and_set_bit(idx
, cpuc
->used_mask
))
997 hwc
->config_base
= MSR_ARCH_PERFMON_FIXED_CTR_CTRL
;
999 * We set it so that counter_base + idx in wrmsr/rdmsr maps to
1000 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
1003 MSR_ARCH_PERFMON_FIXED_CTR0
- X86_PMC_IDX_FIXED
;
1007 /* Try to get the previous generic counter again */
1008 if (test_and_set_bit(idx
, cpuc
->used_mask
)) {
1010 idx
= find_first_zero_bit(cpuc
->used_mask
,
1011 x86_pmu
.num_counters
);
1012 if (idx
== x86_pmu
.num_counters
)
1015 set_bit(idx
, cpuc
->used_mask
);
1018 hwc
->config_base
= x86_pmu
.eventsel
;
1019 hwc
->counter_base
= x86_pmu
.perfctr
;
1022 perf_counters_lapic_init();
1024 x86_pmu
.disable(hwc
, idx
);
1026 cpuc
->counters
[idx
] = counter
;
1027 set_bit(idx
, cpuc
->active_mask
);
1029 x86_perf_counter_set_period(counter
, hwc
, idx
);
1030 x86_pmu
.enable(hwc
, idx
);
1035 static void x86_pmu_unthrottle(struct perf_counter
*counter
)
1037 struct cpu_hw_counters
*cpuc
= &__get_cpu_var(cpu_hw_counters
);
1038 struct hw_perf_counter
*hwc
= &counter
->hw
;
1040 if (WARN_ON_ONCE(hwc
->idx
>= X86_PMC_IDX_MAX
||
1041 cpuc
->counters
[hwc
->idx
] != counter
))
1044 x86_pmu
.enable(hwc
, hwc
->idx
);
1047 void perf_counter_print_debug(void)
1049 u64 ctrl
, status
, overflow
, pmc_ctrl
, pmc_count
, prev_left
, fixed
;
1050 struct cpu_hw_counters
*cpuc
;
1051 unsigned long flags
;
1054 if (!x86_pmu
.num_counters
)
1057 local_irq_save(flags
);
1059 cpu
= smp_processor_id();
1060 cpuc
= &per_cpu(cpu_hw_counters
, cpu
);
1062 if (x86_pmu
.version
>= 2) {
1063 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, ctrl
);
1064 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS
, status
);
1065 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL
, overflow
);
1066 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL
, fixed
);
1069 pr_info("CPU#%d: ctrl: %016llx\n", cpu
, ctrl
);
1070 pr_info("CPU#%d: status: %016llx\n", cpu
, status
);
1071 pr_info("CPU#%d: overflow: %016llx\n", cpu
, overflow
);
1072 pr_info("CPU#%d: fixed: %016llx\n", cpu
, fixed
);
1074 pr_info("CPU#%d: used: %016llx\n", cpu
, *(u64
*)cpuc
->used_mask
);
1076 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1077 rdmsrl(x86_pmu
.eventsel
+ idx
, pmc_ctrl
);
1078 rdmsrl(x86_pmu
.perfctr
+ idx
, pmc_count
);
1080 prev_left
= per_cpu(prev_left
[idx
], cpu
);
1082 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
1083 cpu
, idx
, pmc_ctrl
);
1084 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
1085 cpu
, idx
, pmc_count
);
1086 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
1087 cpu
, idx
, prev_left
);
1089 for (idx
= 0; idx
< x86_pmu
.num_counters_fixed
; idx
++) {
1090 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0
+ idx
, pmc_count
);
1092 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
1093 cpu
, idx
, pmc_count
);
1095 local_irq_restore(flags
);
1098 static void x86_pmu_disable(struct perf_counter
*counter
)
1100 struct cpu_hw_counters
*cpuc
= &__get_cpu_var(cpu_hw_counters
);
1101 struct hw_perf_counter
*hwc
= &counter
->hw
;
1105 * Must be done before we disable, otherwise the nmi handler
1106 * could reenable again:
1108 clear_bit(idx
, cpuc
->active_mask
);
1109 x86_pmu
.disable(hwc
, idx
);
1112 * Make sure the cleared pointer becomes visible before we
1113 * (potentially) free the counter:
1118 * Drain the remaining delta count out of a counter
1119 * that we are disabling:
1121 x86_perf_counter_update(counter
, hwc
, idx
);
1122 cpuc
->counters
[idx
] = NULL
;
1123 clear_bit(idx
, cpuc
->used_mask
);
1127 * Save and restart an expired counter. Called by NMI contexts,
1128 * so it has to be careful about preempting normal counter ops:
1130 static int intel_pmu_save_and_restart(struct perf_counter
*counter
)
1132 struct hw_perf_counter
*hwc
= &counter
->hw
;
1136 x86_perf_counter_update(counter
, hwc
, idx
);
1137 ret
= x86_perf_counter_set_period(counter
, hwc
, idx
);
1139 if (counter
->state
== PERF_COUNTER_STATE_ACTIVE
)
1140 intel_pmu_enable_counter(hwc
, idx
);
1145 static void intel_pmu_reset(void)
1147 unsigned long flags
;
1150 if (!x86_pmu
.num_counters
)
1153 local_irq_save(flags
);
1155 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
1157 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1158 checking_wrmsrl(x86_pmu
.eventsel
+ idx
, 0ull);
1159 checking_wrmsrl(x86_pmu
.perfctr
+ idx
, 0ull);
1161 for (idx
= 0; idx
< x86_pmu
.num_counters_fixed
; idx
++) {
1162 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0
+ idx
, 0ull);
1165 local_irq_restore(flags
);
1170 * This handler is triggered by the local APIC, so the APIC IRQ handling
1173 static int intel_pmu_handle_irq(struct pt_regs
*regs
)
1175 struct cpu_hw_counters
*cpuc
;
1176 struct cpu_hw_counters
;
1177 int bit
, cpu
, loops
;
1180 cpu
= smp_processor_id();
1181 cpuc
= &per_cpu(cpu_hw_counters
, cpu
);
1184 status
= intel_pmu_get_status();
1192 if (++loops
> 100) {
1193 WARN_ONCE(1, "perfcounters: irq loop stuck!\n");
1194 perf_counter_print_debug();
1200 inc_irq_stat(apic_perf_irqs
);
1202 for_each_bit(bit
, (unsigned long *)&status
, X86_PMC_IDX_MAX
) {
1203 struct perf_counter
*counter
= cpuc
->counters
[bit
];
1205 clear_bit(bit
, (unsigned long *) &status
);
1206 if (!test_bit(bit
, cpuc
->active_mask
))
1209 if (!intel_pmu_save_and_restart(counter
))
1212 if (perf_counter_overflow(counter
, 1, regs
, 0))
1213 intel_pmu_disable_counter(&counter
->hw
, bit
);
1216 intel_pmu_ack_status(ack
);
1219 * Repeat if there is more work to be done:
1221 status
= intel_pmu_get_status();
1230 static int amd_pmu_handle_irq(struct pt_regs
*regs
)
1232 int cpu
, idx
, handled
= 0;
1233 struct cpu_hw_counters
*cpuc
;
1234 struct perf_counter
*counter
;
1235 struct hw_perf_counter
*hwc
;
1238 cpu
= smp_processor_id();
1239 cpuc
= &per_cpu(cpu_hw_counters
, cpu
);
1241 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1242 if (!test_bit(idx
, cpuc
->active_mask
))
1245 counter
= cpuc
->counters
[idx
];
1248 val
= x86_perf_counter_update(counter
, hwc
, idx
);
1249 if (val
& (1ULL << (x86_pmu
.counter_bits
- 1)))
1252 /* counter overflow */
1254 inc_irq_stat(apic_perf_irqs
);
1255 if (!x86_perf_counter_set_period(counter
, hwc
, idx
))
1258 if (perf_counter_overflow(counter
, 1, regs
, 0))
1259 amd_pmu_disable_counter(hwc
, idx
);
1265 void smp_perf_pending_interrupt(struct pt_regs
*regs
)
1269 inc_irq_stat(apic_pending_irqs
);
1270 perf_counter_do_pending();
1274 void set_perf_counter_pending(void)
1276 apic
->send_IPI_self(LOCAL_PENDING_VECTOR
);
1279 void perf_counters_lapic_init(void)
1281 if (!x86_pmu_initialized())
1285 * Always use NMI for PMU
1287 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
1290 static int __kprobes
1291 perf_counter_nmi_handler(struct notifier_block
*self
,
1292 unsigned long cmd
, void *__args
)
1294 struct die_args
*args
= __args
;
1295 struct pt_regs
*regs
;
1297 if (!atomic_read(&active_counters
))
1311 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
1313 * Can't rely on the handled return value to say it was our NMI, two
1314 * counters could trigger 'simultaneously' raising two back-to-back NMIs.
1316 * If the first NMI handles both, the latter will be empty and daze
1319 x86_pmu
.handle_irq(regs
);
1324 static __read_mostly
struct notifier_block perf_counter_nmi_notifier
= {
1325 .notifier_call
= perf_counter_nmi_handler
,
1330 static struct x86_pmu intel_pmu
= {
1332 .handle_irq
= intel_pmu_handle_irq
,
1333 .disable_all
= intel_pmu_disable_all
,
1334 .enable_all
= intel_pmu_enable_all
,
1335 .enable
= intel_pmu_enable_counter
,
1336 .disable
= intel_pmu_disable_counter
,
1337 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
1338 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
1339 .event_map
= intel_pmu_event_map
,
1340 .raw_event
= intel_pmu_raw_event
,
1341 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
1343 * Intel PMCs cannot be accessed sanely above 32 bit width,
1344 * so we install an artificial 1<<31 period regardless of
1345 * the generic counter period:
1347 .max_period
= (1ULL << 31) - 1,
1350 static struct x86_pmu amd_pmu
= {
1352 .handle_irq
= amd_pmu_handle_irq
,
1353 .disable_all
= amd_pmu_disable_all
,
1354 .enable_all
= amd_pmu_enable_all
,
1355 .enable
= amd_pmu_enable_counter
,
1356 .disable
= amd_pmu_disable_counter
,
1357 .eventsel
= MSR_K7_EVNTSEL0
,
1358 .perfctr
= MSR_K7_PERFCTR0
,
1359 .event_map
= amd_pmu_event_map
,
1360 .raw_event
= amd_pmu_raw_event
,
1361 .max_events
= ARRAY_SIZE(amd_perfmon_event_map
),
1364 .counter_mask
= (1ULL << 48) - 1,
1365 /* use highest bit to detect overflow */
1366 .max_period
= (1ULL << 47) - 1,
1369 static int intel_pmu_init(void)
1371 union cpuid10_edx edx
;
1372 union cpuid10_eax eax
;
1373 unsigned int unused
;
1377 if (!cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
))
1381 * Check whether the Architectural PerfMon supports
1382 * Branch Misses Retired Event or not.
1384 cpuid(10, &eax
.full
, &ebx
, &unused
, &edx
.full
);
1385 if (eax
.split
.mask_length
<= ARCH_PERFMON_BRANCH_MISSES_RETIRED
)
1388 version
= eax
.split
.version_id
;
1392 x86_pmu
= intel_pmu
;
1393 x86_pmu
.version
= version
;
1394 x86_pmu
.num_counters
= eax
.split
.num_counters
;
1395 x86_pmu
.counter_bits
= eax
.split
.bit_width
;
1396 x86_pmu
.counter_mask
= (1ULL << eax
.split
.bit_width
) - 1;
1399 * Quirk: v2 perfmon does not report fixed-purpose counters, so
1400 * assume at least 3 counters:
1402 x86_pmu
.num_counters_fixed
= max((int)edx
.split
.num_counters_fixed
, 3);
1404 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, x86_pmu
.intel_ctrl
);
1407 * Install the hw-cache-events table:
1409 switch (boot_cpu_data
.x86_model
) {
1411 memcpy(hw_cache_event_ids
, core2_hw_cache_event_ids
,
1412 sizeof(hw_cache_event_ids
));
1414 pr_cont("Core2 events, ");
1418 memcpy(hw_cache_event_ids
, nehalem_hw_cache_event_ids
,
1419 sizeof(hw_cache_event_ids
));
1421 pr_cont("Nehalem/Corei7 events, ");
1424 memcpy(hw_cache_event_ids
, atom_hw_cache_event_ids
,
1425 sizeof(hw_cache_event_ids
));
1427 pr_cont("Atom events, ");
1433 static int amd_pmu_init(void)
1437 switch (boot_cpu_data
.x86
) {
1441 memcpy(hw_cache_event_ids
, amd_0f_hw_cache_event_ids
,
1442 sizeof(hw_cache_event_ids
));
1444 pr_cont("AMD Family 0f/10/11 events, ");
1450 void __init
init_hw_perf_counters(void)
1454 pr_info("Performance Counters: ");
1456 switch (boot_cpu_data
.x86_vendor
) {
1457 case X86_VENDOR_INTEL
:
1458 err
= intel_pmu_init();
1460 case X86_VENDOR_AMD
:
1461 err
= amd_pmu_init();
1467 pr_cont("no PMU driver, software counters only.\n");
1471 pr_cont("%s PMU driver.\n", x86_pmu
.name
);
1473 if (x86_pmu
.num_counters
> X86_PMC_MAX_GENERIC
) {
1474 x86_pmu
.num_counters
= X86_PMC_MAX_GENERIC
;
1475 WARN(1, KERN_ERR
"hw perf counters %d > max(%d), clipping!",
1476 x86_pmu
.num_counters
, X86_PMC_MAX_GENERIC
);
1478 perf_counter_mask
= (1 << x86_pmu
.num_counters
) - 1;
1479 perf_max_counters
= x86_pmu
.num_counters
;
1481 if (x86_pmu
.num_counters_fixed
> X86_PMC_MAX_FIXED
) {
1482 x86_pmu
.num_counters_fixed
= X86_PMC_MAX_FIXED
;
1483 WARN(1, KERN_ERR
"hw perf counters fixed %d > max(%d), clipping!",
1484 x86_pmu
.num_counters_fixed
, X86_PMC_MAX_FIXED
);
1487 perf_counter_mask
|=
1488 ((1LL << x86_pmu
.num_counters_fixed
)-1) << X86_PMC_IDX_FIXED
;
1490 perf_counters_lapic_init();
1491 register_die_notifier(&perf_counter_nmi_notifier
);
1493 pr_info("... version: %d\n", x86_pmu
.version
);
1494 pr_info("... bit width: %d\n", x86_pmu
.counter_bits
);
1495 pr_info("... generic counters: %d\n", x86_pmu
.num_counters
);
1496 pr_info("... value mask: %016Lx\n", x86_pmu
.counter_mask
);
1497 pr_info("... max period: %016Lx\n", x86_pmu
.max_period
);
1498 pr_info("... fixed-purpose counters: %d\n", x86_pmu
.num_counters_fixed
);
1499 pr_info("... counter mask: %016Lx\n", perf_counter_mask
);
1502 static inline void x86_pmu_read(struct perf_counter
*counter
)
1504 x86_perf_counter_update(counter
, &counter
->hw
, counter
->hw
.idx
);
1507 static const struct pmu pmu
= {
1508 .enable
= x86_pmu_enable
,
1509 .disable
= x86_pmu_disable
,
1510 .read
= x86_pmu_read
,
1511 .unthrottle
= x86_pmu_unthrottle
,
1514 const struct pmu
*hw_perf_counter_init(struct perf_counter
*counter
)
1518 err
= __hw_perf_counter_init(counter
);
1520 return ERR_PTR(err
);
1530 void callchain_store(struct perf_callchain_entry
*entry
, unsigned long ip
)
1532 if (entry
->nr
< MAX_STACK_DEPTH
)
1533 entry
->ip
[entry
->nr
++] = ip
;
1536 static DEFINE_PER_CPU(struct perf_callchain_entry
, irq_entry
);
1537 static DEFINE_PER_CPU(struct perf_callchain_entry
, nmi_entry
);
1541 backtrace_warning_symbol(void *data
, char *msg
, unsigned long symbol
)
1543 /* Ignore warnings */
1546 static void backtrace_warning(void *data
, char *msg
)
1548 /* Ignore warnings */
1551 static int backtrace_stack(void *data
, char *name
)
1553 /* Don't bother with IRQ stacks for now */
1557 static void backtrace_address(void *data
, unsigned long addr
, int reliable
)
1559 struct perf_callchain_entry
*entry
= data
;
1562 callchain_store(entry
, addr
);
1565 static const struct stacktrace_ops backtrace_ops
= {
1566 .warning
= backtrace_warning
,
1567 .warning_symbol
= backtrace_warning_symbol
,
1568 .stack
= backtrace_stack
,
1569 .address
= backtrace_address
,
1573 perf_callchain_kernel(struct pt_regs
*regs
, struct perf_callchain_entry
*entry
)
1579 callchain_store(entry
, instruction_pointer(regs
));
1581 stack
= ((char *)regs
+ sizeof(struct pt_regs
));
1582 #ifdef CONFIG_FRAME_POINTER
1583 bp
= frame_pointer(regs
);
1588 dump_trace(NULL
, regs
, (void *)stack
, bp
, &backtrace_ops
, entry
);
1590 entry
->kernel
= entry
->nr
- nr
;
1594 struct stack_frame
{
1595 const void __user
*next_fp
;
1596 unsigned long return_address
;
1599 static int copy_stack_frame(const void __user
*fp
, struct stack_frame
*frame
)
1603 if (!access_ok(VERIFY_READ
, fp
, sizeof(*frame
)))
1607 pagefault_disable();
1608 if (__copy_from_user_inatomic(frame
, fp
, sizeof(*frame
)))
1616 perf_callchain_user(struct pt_regs
*regs
, struct perf_callchain_entry
*entry
)
1618 struct stack_frame frame
;
1619 const void __user
*fp
;
1622 regs
= (struct pt_regs
*)current
->thread
.sp0
- 1;
1623 fp
= (void __user
*)regs
->bp
;
1625 callchain_store(entry
, regs
->ip
);
1627 while (entry
->nr
< MAX_STACK_DEPTH
) {
1628 frame
.next_fp
= NULL
;
1629 frame
.return_address
= 0;
1631 if (!copy_stack_frame(fp
, &frame
))
1634 if ((unsigned long)fp
< user_stack_pointer(regs
))
1637 callchain_store(entry
, frame
.return_address
);
1641 entry
->user
= entry
->nr
- nr
;
1645 perf_do_callchain(struct pt_regs
*regs
, struct perf_callchain_entry
*entry
)
1652 is_user
= user_mode(regs
);
1654 if (!current
|| current
->pid
== 0)
1657 if (is_user
&& current
->state
!= TASK_RUNNING
)
1661 perf_callchain_kernel(regs
, entry
);
1664 perf_callchain_user(regs
, entry
);
1667 struct perf_callchain_entry
*perf_callchain(struct pt_regs
*regs
)
1669 struct perf_callchain_entry
*entry
;
1672 entry
= &__get_cpu_var(nmi_entry
);
1674 entry
= &__get_cpu_var(irq_entry
);
1681 perf_do_callchain(regs
, entry
);