perf stat: Print out instructins/cycle metric
[deliverable/linux.git] / arch / x86 / kernel / cpu / perf_counter.c
CommitLineData
241771ef
IM
1/*
2 * Performance counter x86 architecture code
3 *
98144511
IM
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
241771ef
IM
9 *
10 * For licencing details see kernel-base/COPYING
11 */
12
13#include <linux/perf_counter.h>
14#include <linux/capability.h>
15#include <linux/notifier.h>
16#include <linux/hardirq.h>
17#include <linux/kprobes.h>
4ac13294 18#include <linux/module.h>
241771ef
IM
19#include <linux/kdebug.h>
20#include <linux/sched.h>
d7d59fb3 21#include <linux/uaccess.h>
241771ef 22
241771ef 23#include <asm/apic.h>
d7d59fb3 24#include <asm/stacktrace.h>
4e935e47 25#include <asm/nmi.h>
241771ef 26
862a1a5f 27static u64 perf_counter_mask __read_mostly;
703e937c 28
241771ef 29struct cpu_hw_counters {
862a1a5f 30 struct perf_counter *counters[X86_PMC_IDX_MAX];
43f6201a
RR
31 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
32 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
4b39fd96 33 unsigned long interrupts;
b0f3f28e 34 int enabled;
241771ef
IM
35};
36
37/*
5f4ec28f 38 * struct x86_pmu - generic x86 pmu
241771ef 39 */
5f4ec28f 40struct x86_pmu {
faa28ae0
RR
41 const char *name;
42 int version;
a3288106 43 int (*handle_irq)(struct pt_regs *);
9e35ad38
PZ
44 void (*disable_all)(void);
45 void (*enable_all)(void);
7c90cc45 46 void (*enable)(struct hw_perf_counter *, int);
d4369891 47 void (*disable)(struct hw_perf_counter *, int);
169e41eb
JSR
48 unsigned eventsel;
49 unsigned perfctr;
b0f3f28e
PZ
50 u64 (*event_map)(int);
51 u64 (*raw_event)(u64);
169e41eb 52 int max_events;
0933e5c6
RR
53 int num_counters;
54 int num_counters_fixed;
55 int counter_bits;
56 u64 counter_mask;
c619b8ff 57 u64 max_period;
9e35ad38 58 u64 intel_ctrl;
b56a3802
JSR
59};
60
4a06bd85 61static struct x86_pmu x86_pmu __read_mostly;
b56a3802 62
b0f3f28e
PZ
63static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = {
64 .enabled = 1,
65};
241771ef 66
b56a3802
JSR
67/*
68 * Intel PerfMon v3. Used on Core2 and later.
69 */
b0f3f28e 70static const u64 intel_perfmon_event_map[] =
241771ef 71{
f650a672 72 [PERF_COUNT_CPU_CYCLES] = 0x003c,
241771ef
IM
73 [PERF_COUNT_INSTRUCTIONS] = 0x00c0,
74 [PERF_COUNT_CACHE_REFERENCES] = 0x4f2e,
75 [PERF_COUNT_CACHE_MISSES] = 0x412e,
76 [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4,
77 [PERF_COUNT_BRANCH_MISSES] = 0x00c5,
f650a672 78 [PERF_COUNT_BUS_CYCLES] = 0x013c,
241771ef
IM
79};
80
5f4ec28f 81static u64 intel_pmu_event_map(int event)
b56a3802
JSR
82{
83 return intel_perfmon_event_map[event];
84}
241771ef 85
8326f44d
IM
86/*
87 * Generalized hw caching related event table, filled
88 * in on a per model basis. A value of 0 means
89 * 'not supported', -1 means 'event makes no sense on
90 * this CPU', any other value means the raw event
91 * ID.
92 */
93
94#define C(x) PERF_COUNT_HW_CACHE_##x
95
96static u64 __read_mostly hw_cache_event_ids
97 [PERF_COUNT_HW_CACHE_MAX]
98 [PERF_COUNT_HW_CACHE_OP_MAX]
99 [PERF_COUNT_HW_CACHE_RESULT_MAX];
100
101static const u64 nehalem_hw_cache_event_ids
102 [PERF_COUNT_HW_CACHE_MAX]
103 [PERF_COUNT_HW_CACHE_OP_MAX]
104 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
105{
106 [ C(L1D) ] = {
107 [ C(OP_READ) ] = {
108 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
109 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
110 },
111 [ C(OP_WRITE) ] = {
112 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
113 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
114 },
115 [ C(OP_PREFETCH) ] = {
116 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
117 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
118 },
119 },
120 [ C(L1I ) ] = {
121 [ C(OP_READ) ] = {
122 [ C(RESULT_ACCESS) ] = 0x0480, /* L1I.READS */
123 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
124 },
125 [ C(OP_WRITE) ] = {
126 [ C(RESULT_ACCESS) ] = -1,
127 [ C(RESULT_MISS) ] = -1,
128 },
129 [ C(OP_PREFETCH) ] = {
130 [ C(RESULT_ACCESS) ] = 0x0,
131 [ C(RESULT_MISS) ] = 0x0,
132 },
133 },
134 [ C(L2 ) ] = {
135 [ C(OP_READ) ] = {
136 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
137 [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
138 },
139 [ C(OP_WRITE) ] = {
140 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
141 [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
142 },
143 [ C(OP_PREFETCH) ] = {
144 [ C(RESULT_ACCESS) ] = 0xc024, /* L2_RQSTS.PREFETCHES */
145 [ C(RESULT_MISS) ] = 0x8024, /* L2_RQSTS.PREFETCH_MISS */
146 },
147 },
148 [ C(DTLB) ] = {
149 [ C(OP_READ) ] = {
150 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
151 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
152 },
153 [ C(OP_WRITE) ] = {
154 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
155 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
156 },
157 [ C(OP_PREFETCH) ] = {
158 [ C(RESULT_ACCESS) ] = 0x0,
159 [ C(RESULT_MISS) ] = 0x0,
160 },
161 },
162 [ C(ITLB) ] = {
163 [ C(OP_READ) ] = {
164 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
165 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISS_RETIRED */
166 },
167 [ C(OP_WRITE) ] = {
168 [ C(RESULT_ACCESS) ] = -1,
169 [ C(RESULT_MISS) ] = -1,
170 },
171 [ C(OP_PREFETCH) ] = {
172 [ C(RESULT_ACCESS) ] = -1,
173 [ C(RESULT_MISS) ] = -1,
174 },
175 },
176 [ C(BPU ) ] = {
177 [ C(OP_READ) ] = {
178 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
179 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
180 },
181 [ C(OP_WRITE) ] = {
182 [ C(RESULT_ACCESS) ] = -1,
183 [ C(RESULT_MISS) ] = -1,
184 },
185 [ C(OP_PREFETCH) ] = {
186 [ C(RESULT_ACCESS) ] = -1,
187 [ C(RESULT_MISS) ] = -1,
188 },
189 },
190};
191
192static const u64 core2_hw_cache_event_ids
193 [PERF_COUNT_HW_CACHE_MAX]
194 [PERF_COUNT_HW_CACHE_OP_MAX]
195 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
196{
197 /* To be filled in */
198};
199
200static const u64 atom_hw_cache_event_ids
201 [PERF_COUNT_HW_CACHE_MAX]
202 [PERF_COUNT_HW_CACHE_OP_MAX]
203 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
204{
205 /* To be filled in */
206};
207
5f4ec28f 208static u64 intel_pmu_raw_event(u64 event)
b0f3f28e 209{
82bae4f8
PZ
210#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
211#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
ff99be57
PZ
212#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
213#define CORE_EVNTSEL_INV_MASK 0x00800000ULL
82bae4f8 214#define CORE_EVNTSEL_COUNTER_MASK 0xFF000000ULL
b0f3f28e 215
128f048f 216#define CORE_EVNTSEL_MASK \
b0f3f28e
PZ
217 (CORE_EVNTSEL_EVENT_MASK | \
218 CORE_EVNTSEL_UNIT_MASK | \
ff99be57
PZ
219 CORE_EVNTSEL_EDGE_MASK | \
220 CORE_EVNTSEL_INV_MASK | \
b0f3f28e
PZ
221 CORE_EVNTSEL_COUNTER_MASK)
222
223 return event & CORE_EVNTSEL_MASK;
224}
225
f87ad35d
JSR
226/*
227 * AMD Performance Monitor K7 and later.
228 */
b0f3f28e 229static const u64 amd_perfmon_event_map[] =
f87ad35d
JSR
230{
231 [PERF_COUNT_CPU_CYCLES] = 0x0076,
232 [PERF_COUNT_INSTRUCTIONS] = 0x00c0,
233 [PERF_COUNT_CACHE_REFERENCES] = 0x0080,
234 [PERF_COUNT_CACHE_MISSES] = 0x0081,
235 [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4,
236 [PERF_COUNT_BRANCH_MISSES] = 0x00c5,
237};
238
5f4ec28f 239static u64 amd_pmu_event_map(int event)
f87ad35d
JSR
240{
241 return amd_perfmon_event_map[event];
242}
243
5f4ec28f 244static u64 amd_pmu_raw_event(u64 event)
b0f3f28e 245{
82bae4f8
PZ
246#define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
247#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
ff99be57
PZ
248#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
249#define K7_EVNTSEL_INV_MASK 0x000800000ULL
82bae4f8 250#define K7_EVNTSEL_COUNTER_MASK 0x0FF000000ULL
b0f3f28e
PZ
251
252#define K7_EVNTSEL_MASK \
253 (K7_EVNTSEL_EVENT_MASK | \
254 K7_EVNTSEL_UNIT_MASK | \
ff99be57
PZ
255 K7_EVNTSEL_EDGE_MASK | \
256 K7_EVNTSEL_INV_MASK | \
b0f3f28e
PZ
257 K7_EVNTSEL_COUNTER_MASK)
258
259 return event & K7_EVNTSEL_MASK;
260}
261
ee06094f
IM
262/*
263 * Propagate counter elapsed time into the generic counter.
264 * Can only be executed on the CPU where the counter is active.
265 * Returns the delta events processed.
266 */
4b7bfd0d 267static u64
ee06094f
IM
268x86_perf_counter_update(struct perf_counter *counter,
269 struct hw_perf_counter *hwc, int idx)
270{
ec3232bd
PZ
271 int shift = 64 - x86_pmu.counter_bits;
272 u64 prev_raw_count, new_raw_count;
273 s64 delta;
ee06094f 274
ee06094f
IM
275 /*
276 * Careful: an NMI might modify the previous counter value.
277 *
278 * Our tactic to handle this is to first atomically read and
279 * exchange a new raw count - then add that new-prev delta
280 * count to the generic counter atomically:
281 */
282again:
283 prev_raw_count = atomic64_read(&hwc->prev_count);
284 rdmsrl(hwc->counter_base + idx, new_raw_count);
285
286 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
287 new_raw_count) != prev_raw_count)
288 goto again;
289
290 /*
291 * Now we have the new raw value and have updated the prev
292 * timestamp already. We can now calculate the elapsed delta
293 * (counter-)time and add that to the generic counter.
294 *
295 * Careful, not all hw sign-extends above the physical width
ec3232bd 296 * of the count.
ee06094f 297 */
ec3232bd
PZ
298 delta = (new_raw_count << shift) - (prev_raw_count << shift);
299 delta >>= shift;
ee06094f
IM
300
301 atomic64_add(delta, &counter->count);
302 atomic64_sub(delta, &hwc->period_left);
4b7bfd0d
RR
303
304 return new_raw_count;
ee06094f
IM
305}
306
ba77813a 307static atomic_t active_counters;
4e935e47
PZ
308static DEFINE_MUTEX(pmc_reserve_mutex);
309
310static bool reserve_pmc_hardware(void)
311{
312 int i;
313
314 if (nmi_watchdog == NMI_LOCAL_APIC)
315 disable_lapic_nmi_watchdog();
316
0933e5c6 317 for (i = 0; i < x86_pmu.num_counters; i++) {
4a06bd85 318 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
4e935e47
PZ
319 goto perfctr_fail;
320 }
321
0933e5c6 322 for (i = 0; i < x86_pmu.num_counters; i++) {
4a06bd85 323 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
4e935e47
PZ
324 goto eventsel_fail;
325 }
326
327 return true;
328
329eventsel_fail:
330 for (i--; i >= 0; i--)
4a06bd85 331 release_evntsel_nmi(x86_pmu.eventsel + i);
4e935e47 332
0933e5c6 333 i = x86_pmu.num_counters;
4e935e47
PZ
334
335perfctr_fail:
336 for (i--; i >= 0; i--)
4a06bd85 337 release_perfctr_nmi(x86_pmu.perfctr + i);
4e935e47
PZ
338
339 if (nmi_watchdog == NMI_LOCAL_APIC)
340 enable_lapic_nmi_watchdog();
341
342 return false;
343}
344
345static void release_pmc_hardware(void)
346{
347 int i;
348
0933e5c6 349 for (i = 0; i < x86_pmu.num_counters; i++) {
4a06bd85
RR
350 release_perfctr_nmi(x86_pmu.perfctr + i);
351 release_evntsel_nmi(x86_pmu.eventsel + i);
4e935e47
PZ
352 }
353
354 if (nmi_watchdog == NMI_LOCAL_APIC)
355 enable_lapic_nmi_watchdog();
356}
357
358static void hw_perf_counter_destroy(struct perf_counter *counter)
359{
ba77813a 360 if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) {
4e935e47
PZ
361 release_pmc_hardware();
362 mutex_unlock(&pmc_reserve_mutex);
363 }
364}
365
85cf9dba
RR
366static inline int x86_pmu_initialized(void)
367{
368 return x86_pmu.handle_irq != NULL;
369}
370
8326f44d
IM
371static inline int
372set_ext_hw_attr(struct hw_perf_counter *hwc, struct perf_counter_attr *attr)
373{
374 unsigned int cache_type, cache_op, cache_result;
375 u64 config, val;
376
377 config = attr->config;
378
379 cache_type = (config >> 0) & 0xff;
380 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
381 return -EINVAL;
382
383 cache_op = (config >> 8) & 0xff;
384 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
385 return -EINVAL;
386
387 cache_result = (config >> 16) & 0xff;
388 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
389 return -EINVAL;
390
391 val = hw_cache_event_ids[cache_type][cache_op][cache_result];
392
393 if (val == 0)
394 return -ENOENT;
395
396 if (val == -1)
397 return -EINVAL;
398
399 hwc->config |= val;
400
401 return 0;
402}
403
241771ef 404/*
0d48696f 405 * Setup the hardware configuration for a given attr_type
241771ef 406 */
621a01ea 407static int __hw_perf_counter_init(struct perf_counter *counter)
241771ef 408{
0d48696f 409 struct perf_counter_attr *attr = &counter->attr;
241771ef 410 struct hw_perf_counter *hwc = &counter->hw;
4e935e47 411 int err;
241771ef 412
85cf9dba
RR
413 if (!x86_pmu_initialized())
414 return -ENODEV;
241771ef 415
4e935e47 416 err = 0;
ba77813a 417 if (!atomic_inc_not_zero(&active_counters)) {
4e935e47 418 mutex_lock(&pmc_reserve_mutex);
ba77813a 419 if (atomic_read(&active_counters) == 0 && !reserve_pmc_hardware())
4e935e47
PZ
420 err = -EBUSY;
421 else
ba77813a 422 atomic_inc(&active_counters);
4e935e47
PZ
423 mutex_unlock(&pmc_reserve_mutex);
424 }
425 if (err)
426 return err;
427
241771ef 428 /*
0475f9ea 429 * Generate PMC IRQs:
241771ef
IM
430 * (keep 'enabled' bit clear for now)
431 */
0475f9ea 432 hwc->config = ARCH_PERFMON_EVENTSEL_INT;
241771ef
IM
433
434 /*
0475f9ea 435 * Count user and OS events unless requested not to.
241771ef 436 */
0d48696f 437 if (!attr->exclude_user)
0475f9ea 438 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
0d48696f 439 if (!attr->exclude_kernel)
241771ef 440 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
0475f9ea 441
b23f3325
PZ
442 if (!hwc->sample_period)
443 hwc->sample_period = x86_pmu.max_period;
d2517a49 444
e4abb5d4 445 atomic64_set(&hwc->period_left, hwc->sample_period);
8326f44d 446 counter->destroy = hw_perf_counter_destroy;
241771ef
IM
447
448 /*
dfa7c899 449 * Raw event type provide the config in the event structure
241771ef 450 */
a21ca2ca
IM
451 if (attr->type == PERF_TYPE_RAW) {
452 hwc->config |= x86_pmu.raw_event(attr->config);
8326f44d 453 return 0;
241771ef 454 }
241771ef 455
8326f44d
IM
456 if (attr->type == PERF_TYPE_HW_CACHE)
457 return set_ext_hw_attr(hwc, attr);
458
459 if (attr->config >= x86_pmu.max_events)
460 return -EINVAL;
461 /*
462 * The generic map:
463 */
464 hwc->config |= x86_pmu.event_map(attr->config);
4e935e47 465
241771ef
IM
466 return 0;
467}
468
9e35ad38 469static void intel_pmu_disable_all(void)
4ac13294 470{
862a1a5f 471 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
241771ef 472}
b56a3802 473
9e35ad38 474static void amd_pmu_disable_all(void)
f87ad35d 475{
b0f3f28e 476 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
9e35ad38
PZ
477 int idx;
478
479 if (!cpuc->enabled)
480 return;
b0f3f28e 481
b0f3f28e 482 cpuc->enabled = 0;
60b3df9c
PZ
483 /*
484 * ensure we write the disable before we start disabling the
5f4ec28f
RR
485 * counters proper, so that amd_pmu_enable_counter() does the
486 * right thing.
60b3df9c 487 */
b0f3f28e 488 barrier();
f87ad35d 489
0933e5c6 490 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
b0f3f28e
PZ
491 u64 val;
492
43f6201a 493 if (!test_bit(idx, cpuc->active_mask))
4295ee62 494 continue;
f87ad35d 495 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
4295ee62
RR
496 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
497 continue;
498 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
499 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
f87ad35d 500 }
f87ad35d
JSR
501}
502
9e35ad38 503void hw_perf_disable(void)
b56a3802 504{
85cf9dba 505 if (!x86_pmu_initialized())
9e35ad38
PZ
506 return;
507 return x86_pmu.disable_all();
b56a3802 508}
241771ef 509
9e35ad38 510static void intel_pmu_enable_all(void)
b56a3802 511{
9e35ad38 512 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
b56a3802
JSR
513}
514
9e35ad38 515static void amd_pmu_enable_all(void)
f87ad35d 516{
b0f3f28e 517 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
f87ad35d
JSR
518 int idx;
519
9e35ad38 520 if (cpuc->enabled)
b0f3f28e
PZ
521 return;
522
9e35ad38
PZ
523 cpuc->enabled = 1;
524 barrier();
525
0933e5c6 526 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
4295ee62 527 u64 val;
b0f3f28e 528
43f6201a 529 if (!test_bit(idx, cpuc->active_mask))
4295ee62
RR
530 continue;
531 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
532 if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
533 continue;
534 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
535 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
f87ad35d
JSR
536 }
537}
538
9e35ad38 539void hw_perf_enable(void)
ee06094f 540{
85cf9dba 541 if (!x86_pmu_initialized())
2b9ff0db 542 return;
9e35ad38 543 x86_pmu.enable_all();
ee06094f 544}
ee06094f 545
19d84dab 546static inline u64 intel_pmu_get_status(void)
b0f3f28e
PZ
547{
548 u64 status;
549
b7f8859a 550 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
b0f3f28e 551
b7f8859a 552 return status;
b0f3f28e
PZ
553}
554
dee5d906 555static inline void intel_pmu_ack_status(u64 ack)
b0f3f28e
PZ
556{
557 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
558}
559
7c90cc45 560static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
b0f3f28e 561{
7c90cc45 562 int err;
7c90cc45
RR
563 err = checking_wrmsrl(hwc->config_base + idx,
564 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
b0f3f28e
PZ
565}
566
d4369891 567static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
b0f3f28e 568{
d4369891 569 int err;
d4369891
RR
570 err = checking_wrmsrl(hwc->config_base + idx,
571 hwc->config);
b0f3f28e
PZ
572}
573
2f18d1e8 574static inline void
d4369891 575intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx)
2f18d1e8
IM
576{
577 int idx = __idx - X86_PMC_IDX_FIXED;
578 u64 ctrl_val, mask;
579 int err;
580
581 mask = 0xfULL << (idx * 4);
582
583 rdmsrl(hwc->config_base, ctrl_val);
584 ctrl_val &= ~mask;
585 err = checking_wrmsrl(hwc->config_base, ctrl_val);
586}
587
7e2ae347 588static inline void
d4369891 589intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
7e2ae347 590{
d4369891
RR
591 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
592 intel_pmu_disable_fixed(hwc, idx);
593 return;
594 }
595
596 x86_pmu_disable_counter(hwc, idx);
597}
598
599static inline void
600amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
601{
602 x86_pmu_disable_counter(hwc, idx);
7e2ae347
IM
603}
604
2f18d1e8 605static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]);
241771ef 606
ee06094f
IM
607/*
608 * Set the next IRQ period, based on the hwc->period_left value.
609 * To be called with the counter disabled in hw:
610 */
e4abb5d4 611static int
26816c28 612x86_perf_counter_set_period(struct perf_counter *counter,
ee06094f 613 struct hw_perf_counter *hwc, int idx)
241771ef 614{
2f18d1e8 615 s64 left = atomic64_read(&hwc->period_left);
e4abb5d4
PZ
616 s64 period = hwc->sample_period;
617 int err, ret = 0;
ee06094f 618
ee06094f
IM
619 /*
620 * If we are way outside a reasoable range then just skip forward:
621 */
622 if (unlikely(left <= -period)) {
623 left = period;
624 atomic64_set(&hwc->period_left, left);
e4abb5d4 625 ret = 1;
ee06094f
IM
626 }
627
628 if (unlikely(left <= 0)) {
629 left += period;
630 atomic64_set(&hwc->period_left, left);
e4abb5d4 631 ret = 1;
ee06094f 632 }
1c80f4b5
IM
633 /*
634 * Quirk: certain CPUs dont like it if just 1 event is left:
635 */
636 if (unlikely(left < 2))
637 left = 2;
241771ef 638
e4abb5d4
PZ
639 if (left > x86_pmu.max_period)
640 left = x86_pmu.max_period;
641
ee06094f
IM
642 per_cpu(prev_left[idx], smp_processor_id()) = left;
643
644 /*
645 * The hw counter starts counting from this counter offset,
646 * mark it to be able to extra future deltas:
647 */
2f18d1e8 648 atomic64_set(&hwc->prev_count, (u64)-left);
ee06094f 649
2f18d1e8 650 err = checking_wrmsrl(hwc->counter_base + idx,
0933e5c6 651 (u64)(-left) & x86_pmu.counter_mask);
e4abb5d4
PZ
652
653 return ret;
2f18d1e8
IM
654}
655
656static inline void
7c90cc45 657intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx)
2f18d1e8
IM
658{
659 int idx = __idx - X86_PMC_IDX_FIXED;
660 u64 ctrl_val, bits, mask;
661 int err;
662
663 /*
0475f9ea
PM
664 * Enable IRQ generation (0x8),
665 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
666 * if requested:
2f18d1e8 667 */
0475f9ea
PM
668 bits = 0x8ULL;
669 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
670 bits |= 0x2;
2f18d1e8
IM
671 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
672 bits |= 0x1;
673 bits <<= (idx * 4);
674 mask = 0xfULL << (idx * 4);
675
676 rdmsrl(hwc->config_base, ctrl_val);
677 ctrl_val &= ~mask;
678 ctrl_val |= bits;
679 err = checking_wrmsrl(hwc->config_base, ctrl_val);
7e2ae347
IM
680}
681
7c90cc45 682static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
7e2ae347 683{
7c90cc45
RR
684 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
685 intel_pmu_enable_fixed(hwc, idx);
686 return;
687 }
688
689 x86_pmu_enable_counter(hwc, idx);
690}
691
692static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
693{
694 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
695
696 if (cpuc->enabled)
697 x86_pmu_enable_counter(hwc, idx);
2b583d8b 698 else
d4369891 699 x86_pmu_disable_counter(hwc, idx);
241771ef
IM
700}
701
2f18d1e8
IM
702static int
703fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
862a1a5f 704{
2f18d1e8
IM
705 unsigned int event;
706
ef7b3e09 707 if (!x86_pmu.num_counters_fixed)
f87ad35d
JSR
708 return -1;
709
2f18d1e8
IM
710 event = hwc->config & ARCH_PERFMON_EVENT_MASK;
711
4a06bd85 712 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_INSTRUCTIONS)))
2f18d1e8 713 return X86_PMC_IDX_FIXED_INSTRUCTIONS;
4a06bd85 714 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_CPU_CYCLES)))
2f18d1e8 715 return X86_PMC_IDX_FIXED_CPU_CYCLES;
4a06bd85 716 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_BUS_CYCLES)))
2f18d1e8
IM
717 return X86_PMC_IDX_FIXED_BUS_CYCLES;
718
862a1a5f
IM
719 return -1;
720}
721
ee06094f
IM
722/*
723 * Find a PMC slot for the freshly enabled / scheduled in counter:
724 */
4aeb0b42 725static int x86_pmu_enable(struct perf_counter *counter)
241771ef
IM
726{
727 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
728 struct hw_perf_counter *hwc = &counter->hw;
2f18d1e8 729 int idx;
241771ef 730
2f18d1e8
IM
731 idx = fixed_mode_idx(counter, hwc);
732 if (idx >= 0) {
733 /*
734 * Try to get the fixed counter, if that is already taken
735 * then try to get a generic counter:
736 */
43f6201a 737 if (test_and_set_bit(idx, cpuc->used_mask))
2f18d1e8 738 goto try_generic;
0dff86aa 739
2f18d1e8
IM
740 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
741 /*
742 * We set it so that counter_base + idx in wrmsr/rdmsr maps to
743 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
744 */
745 hwc->counter_base =
746 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
241771ef 747 hwc->idx = idx;
2f18d1e8
IM
748 } else {
749 idx = hwc->idx;
750 /* Try to get the previous generic counter again */
43f6201a 751 if (test_and_set_bit(idx, cpuc->used_mask)) {
2f18d1e8 752try_generic:
43f6201a 753 idx = find_first_zero_bit(cpuc->used_mask,
0933e5c6
RR
754 x86_pmu.num_counters);
755 if (idx == x86_pmu.num_counters)
2f18d1e8
IM
756 return -EAGAIN;
757
43f6201a 758 set_bit(idx, cpuc->used_mask);
2f18d1e8
IM
759 hwc->idx = idx;
760 }
4a06bd85
RR
761 hwc->config_base = x86_pmu.eventsel;
762 hwc->counter_base = x86_pmu.perfctr;
241771ef
IM
763 }
764
c323d95f 765 perf_counters_lapic_init();
53b441a5 766
d4369891 767 x86_pmu.disable(hwc, idx);
241771ef 768
862a1a5f 769 cpuc->counters[idx] = counter;
43f6201a 770 set_bit(idx, cpuc->active_mask);
7e2ae347 771
26816c28 772 x86_perf_counter_set_period(counter, hwc, idx);
7c90cc45 773 x86_pmu.enable(hwc, idx);
95cdd2e7
IM
774
775 return 0;
241771ef
IM
776}
777
a78ac325
PZ
778static void x86_pmu_unthrottle(struct perf_counter *counter)
779{
780 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
781 struct hw_perf_counter *hwc = &counter->hw;
782
783 if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
784 cpuc->counters[hwc->idx] != counter))
785 return;
786
787 x86_pmu.enable(hwc, hwc->idx);
788}
789
241771ef
IM
790void perf_counter_print_debug(void)
791{
2f18d1e8 792 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
0dff86aa 793 struct cpu_hw_counters *cpuc;
5bb9efe3 794 unsigned long flags;
1e125676
IM
795 int cpu, idx;
796
0933e5c6 797 if (!x86_pmu.num_counters)
1e125676 798 return;
241771ef 799
5bb9efe3 800 local_irq_save(flags);
241771ef
IM
801
802 cpu = smp_processor_id();
0dff86aa 803 cpuc = &per_cpu(cpu_hw_counters, cpu);
241771ef 804
faa28ae0 805 if (x86_pmu.version >= 2) {
a1ef58f4
JSR
806 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
807 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
808 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
809 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
810
811 pr_info("\n");
812 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
813 pr_info("CPU#%d: status: %016llx\n", cpu, status);
814 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
815 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
f87ad35d 816 }
43f6201a 817 pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used_mask);
241771ef 818
0933e5c6 819 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
4a06bd85
RR
820 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
821 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
241771ef 822
ee06094f 823 prev_left = per_cpu(prev_left[idx], cpu);
241771ef 824
a1ef58f4 825 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
241771ef 826 cpu, idx, pmc_ctrl);
a1ef58f4 827 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
241771ef 828 cpu, idx, pmc_count);
a1ef58f4 829 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
ee06094f 830 cpu, idx, prev_left);
241771ef 831 }
0933e5c6 832 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
2f18d1e8
IM
833 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
834
a1ef58f4 835 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
2f18d1e8
IM
836 cpu, idx, pmc_count);
837 }
5bb9efe3 838 local_irq_restore(flags);
241771ef
IM
839}
840
4aeb0b42 841static void x86_pmu_disable(struct perf_counter *counter)
241771ef
IM
842{
843 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
844 struct hw_perf_counter *hwc = &counter->hw;
6f00cada 845 int idx = hwc->idx;
241771ef 846
09534238
RR
847 /*
848 * Must be done before we disable, otherwise the nmi handler
849 * could reenable again:
850 */
43f6201a 851 clear_bit(idx, cpuc->active_mask);
d4369891 852 x86_pmu.disable(hwc, idx);
241771ef 853
2f18d1e8
IM
854 /*
855 * Make sure the cleared pointer becomes visible before we
856 * (potentially) free the counter:
857 */
527e26af 858 barrier();
241771ef 859
ee06094f
IM
860 /*
861 * Drain the remaining delta count out of a counter
862 * that we are disabling:
863 */
864 x86_perf_counter_update(counter, hwc, idx);
09534238 865 cpuc->counters[idx] = NULL;
43f6201a 866 clear_bit(idx, cpuc->used_mask);
241771ef
IM
867}
868
7e2ae347 869/*
ee06094f
IM
870 * Save and restart an expired counter. Called by NMI contexts,
871 * so it has to be careful about preempting normal counter ops:
7e2ae347 872 */
e4abb5d4 873static int intel_pmu_save_and_restart(struct perf_counter *counter)
241771ef
IM
874{
875 struct hw_perf_counter *hwc = &counter->hw;
876 int idx = hwc->idx;
e4abb5d4 877 int ret;
241771ef 878
ee06094f 879 x86_perf_counter_update(counter, hwc, idx);
e4abb5d4 880 ret = x86_perf_counter_set_period(counter, hwc, idx);
7e2ae347 881
2f18d1e8 882 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
7c90cc45 883 intel_pmu_enable_counter(hwc, idx);
e4abb5d4
PZ
884
885 return ret;
241771ef
IM
886}
887
aaba9801
IM
888static void intel_pmu_reset(void)
889{
890 unsigned long flags;
891 int idx;
892
893 if (!x86_pmu.num_counters)
894 return;
895
896 local_irq_save(flags);
897
898 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
899
900 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
901 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
902 checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
903 }
904 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
905 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
906 }
907
908 local_irq_restore(flags);
909}
910
911
241771ef
IM
912/*
913 * This handler is triggered by the local APIC, so the APIC IRQ handling
914 * rules apply:
915 */
a3288106 916static int intel_pmu_handle_irq(struct pt_regs *regs)
241771ef 917{
9029a5e3
IM
918 struct cpu_hw_counters *cpuc;
919 struct cpu_hw_counters;
920 int bit, cpu, loops;
4b39fd96 921 u64 ack, status;
9029a5e3
IM
922
923 cpu = smp_processor_id();
924 cpuc = &per_cpu(cpu_hw_counters, cpu);
241771ef 925
9e35ad38 926 perf_disable();
19d84dab 927 status = intel_pmu_get_status();
9e35ad38
PZ
928 if (!status) {
929 perf_enable();
930 return 0;
931 }
87b9cf46 932
9029a5e3 933 loops = 0;
241771ef 934again:
9029a5e3
IM
935 if (++loops > 100) {
936 WARN_ONCE(1, "perfcounters: irq loop stuck!\n");
34adc806 937 perf_counter_print_debug();
aaba9801
IM
938 intel_pmu_reset();
939 perf_enable();
9029a5e3
IM
940 return 1;
941 }
942
d278c484 943 inc_irq_stat(apic_perf_irqs);
241771ef 944 ack = status;
2f18d1e8 945 for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
862a1a5f 946 struct perf_counter *counter = cpuc->counters[bit];
241771ef
IM
947
948 clear_bit(bit, (unsigned long *) &status);
43f6201a 949 if (!test_bit(bit, cpuc->active_mask))
241771ef
IM
950 continue;
951
e4abb5d4
PZ
952 if (!intel_pmu_save_and_restart(counter))
953 continue;
954
a3288106 955 if (perf_counter_overflow(counter, 1, regs, 0))
d4369891 956 intel_pmu_disable_counter(&counter->hw, bit);
241771ef
IM
957 }
958
dee5d906 959 intel_pmu_ack_status(ack);
241771ef
IM
960
961 /*
962 * Repeat if there is more work to be done:
963 */
19d84dab 964 status = intel_pmu_get_status();
241771ef
IM
965 if (status)
966 goto again;
b0f3f28e 967
48e22d56 968 perf_enable();
9e35ad38
PZ
969
970 return 1;
1b023a96
MG
971}
972
a3288106 973static int amd_pmu_handle_irq(struct pt_regs *regs)
a29aa8a7 974{
48e22d56 975 int cpu, idx, handled = 0;
9029a5e3 976 struct cpu_hw_counters *cpuc;
a29aa8a7
RR
977 struct perf_counter *counter;
978 struct hw_perf_counter *hwc;
9029a5e3
IM
979 u64 val;
980
981 cpu = smp_processor_id();
982 cpuc = &per_cpu(cpu_hw_counters, cpu);
962bf7a6 983
a29aa8a7 984 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
43f6201a 985 if (!test_bit(idx, cpuc->active_mask))
a29aa8a7 986 continue;
962bf7a6 987
a29aa8a7
RR
988 counter = cpuc->counters[idx];
989 hwc = &counter->hw;
a4016a79 990
4b7bfd0d 991 val = x86_perf_counter_update(counter, hwc, idx);
a29aa8a7 992 if (val & (1ULL << (x86_pmu.counter_bits - 1)))
48e22d56 993 continue;
962bf7a6 994
a29aa8a7 995 /* counter overflow */
a29aa8a7
RR
996 handled = 1;
997 inc_irq_stat(apic_perf_irqs);
e4abb5d4
PZ
998 if (!x86_perf_counter_set_period(counter, hwc, idx))
999 continue;
1000
a3288106 1001 if (perf_counter_overflow(counter, 1, regs, 0))
a29aa8a7 1002 amd_pmu_disable_counter(hwc, idx);
a29aa8a7 1003 }
962bf7a6 1004
a29aa8a7
RR
1005 return handled;
1006}
39d81eab 1007
b6276f35
PZ
1008void smp_perf_pending_interrupt(struct pt_regs *regs)
1009{
1010 irq_enter();
1011 ack_APIC_irq();
1012 inc_irq_stat(apic_pending_irqs);
1013 perf_counter_do_pending();
1014 irq_exit();
1015}
1016
1017void set_perf_counter_pending(void)
1018{
1019 apic->send_IPI_self(LOCAL_PENDING_VECTOR);
1020}
1021
c323d95f 1022void perf_counters_lapic_init(void)
241771ef 1023{
85cf9dba 1024 if (!x86_pmu_initialized())
241771ef 1025 return;
85cf9dba 1026
241771ef 1027 /*
c323d95f 1028 * Always use NMI for PMU
241771ef 1029 */
c323d95f 1030 apic_write(APIC_LVTPC, APIC_DM_NMI);
241771ef
IM
1031}
1032
1033static int __kprobes
1034perf_counter_nmi_handler(struct notifier_block *self,
1035 unsigned long cmd, void *__args)
1036{
1037 struct die_args *args = __args;
1038 struct pt_regs *regs;
b0f3f28e 1039
ba77813a 1040 if (!atomic_read(&active_counters))
63a809a2
PZ
1041 return NOTIFY_DONE;
1042
b0f3f28e
PZ
1043 switch (cmd) {
1044 case DIE_NMI:
1045 case DIE_NMI_IPI:
1046 break;
241771ef 1047
b0f3f28e 1048 default:
241771ef 1049 return NOTIFY_DONE;
b0f3f28e 1050 }
241771ef
IM
1051
1052 regs = args->regs;
1053
1054 apic_write(APIC_LVTPC, APIC_DM_NMI);
a4016a79
PZ
1055 /*
1056 * Can't rely on the handled return value to say it was our NMI, two
1057 * counters could trigger 'simultaneously' raising two back-to-back NMIs.
1058 *
1059 * If the first NMI handles both, the latter will be empty and daze
1060 * the CPU.
1061 */
a3288106 1062 x86_pmu.handle_irq(regs);
241771ef 1063
a4016a79 1064 return NOTIFY_STOP;
241771ef
IM
1065}
1066
1067static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
5b75af0a
MG
1068 .notifier_call = perf_counter_nmi_handler,
1069 .next = NULL,
1070 .priority = 1
241771ef
IM
1071};
1072
5f4ec28f 1073static struct x86_pmu intel_pmu = {
faa28ae0 1074 .name = "Intel",
39d81eab 1075 .handle_irq = intel_pmu_handle_irq,
9e35ad38
PZ
1076 .disable_all = intel_pmu_disable_all,
1077 .enable_all = intel_pmu_enable_all,
5f4ec28f
RR
1078 .enable = intel_pmu_enable_counter,
1079 .disable = intel_pmu_disable_counter,
b56a3802
JSR
1080 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
1081 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
5f4ec28f
RR
1082 .event_map = intel_pmu_event_map,
1083 .raw_event = intel_pmu_raw_event,
b56a3802 1084 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
c619b8ff
RR
1085 /*
1086 * Intel PMCs cannot be accessed sanely above 32 bit width,
1087 * so we install an artificial 1<<31 period regardless of
1088 * the generic counter period:
1089 */
1090 .max_period = (1ULL << 31) - 1,
b56a3802
JSR
1091};
1092
5f4ec28f 1093static struct x86_pmu amd_pmu = {
faa28ae0 1094 .name = "AMD",
39d81eab 1095 .handle_irq = amd_pmu_handle_irq,
9e35ad38
PZ
1096 .disable_all = amd_pmu_disable_all,
1097 .enable_all = amd_pmu_enable_all,
5f4ec28f
RR
1098 .enable = amd_pmu_enable_counter,
1099 .disable = amd_pmu_disable_counter,
f87ad35d
JSR
1100 .eventsel = MSR_K7_EVNTSEL0,
1101 .perfctr = MSR_K7_PERFCTR0,
5f4ec28f
RR
1102 .event_map = amd_pmu_event_map,
1103 .raw_event = amd_pmu_raw_event,
f87ad35d 1104 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
0933e5c6
RR
1105 .num_counters = 4,
1106 .counter_bits = 48,
1107 .counter_mask = (1ULL << 48) - 1,
c619b8ff
RR
1108 /* use highest bit to detect overflow */
1109 .max_period = (1ULL << 47) - 1,
f87ad35d
JSR
1110};
1111
72eae04d 1112static int intel_pmu_init(void)
241771ef 1113{
7bb497bd 1114 union cpuid10_edx edx;
241771ef 1115 union cpuid10_eax eax;
703e937c 1116 unsigned int unused;
7bb497bd 1117 unsigned int ebx;
faa28ae0 1118 int version;
241771ef 1119
da1a776b 1120 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
72eae04d 1121 return -ENODEV;
da1a776b 1122
241771ef
IM
1123 /*
1124 * Check whether the Architectural PerfMon supports
1125 * Branch Misses Retired Event or not.
1126 */
703e937c 1127 cpuid(10, &eax.full, &ebx, &unused, &edx.full);
241771ef 1128 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
72eae04d 1129 return -ENODEV;
241771ef 1130
faa28ae0
RR
1131 version = eax.split.version_id;
1132 if (version < 2)
72eae04d 1133 return -ENODEV;
7bb497bd 1134
4a06bd85 1135 x86_pmu = intel_pmu;
faa28ae0 1136 x86_pmu.version = version;
0933e5c6 1137 x86_pmu.num_counters = eax.split.num_counters;
066d7dea
IM
1138
1139 /*
1140 * Quirk: v2 perfmon does not report fixed-purpose counters, so
1141 * assume at least 3 counters:
1142 */
1143 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
1144
0933e5c6
RR
1145 x86_pmu.counter_bits = eax.split.bit_width;
1146 x86_pmu.counter_mask = (1ULL << eax.split.bit_width) - 1;
b56a3802 1147
9e35ad38
PZ
1148 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
1149
8326f44d
IM
1150 /*
1151 * Nehalem:
1152 */
1153 switch (boot_cpu_data.x86_model) {
1154 case 17:
1155 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
1156 sizeof(u64)*PERF_COUNT_HW_CACHE_MAX*
1157 PERF_COUNT_HW_CACHE_OP_MAX*PERF_COUNT_HW_CACHE_RESULT_MAX);
1158
1159 pr_info("... installed Core2 event tables\n");
1160 break;
1161 default:
1162 case 26:
1163 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
1164 sizeof(u64)*PERF_COUNT_HW_CACHE_MAX*
1165 PERF_COUNT_HW_CACHE_OP_MAX*PERF_COUNT_HW_CACHE_RESULT_MAX);
1166
1167 pr_info("... installed Nehalem/Corei7 event tables\n");
1168 break;
1169 case 28:
1170 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
1171 sizeof(u64)*PERF_COUNT_HW_CACHE_MAX*
1172 PERF_COUNT_HW_CACHE_OP_MAX*PERF_COUNT_HW_CACHE_RESULT_MAX);
1173
1174 pr_info("... installed Atom event tables\n");
1175 break;
1176 }
72eae04d 1177 return 0;
b56a3802
JSR
1178}
1179
72eae04d 1180static int amd_pmu_init(void)
f87ad35d 1181{
4a06bd85 1182 x86_pmu = amd_pmu;
72eae04d 1183 return 0;
f87ad35d
JSR
1184}
1185
b56a3802
JSR
1186void __init init_hw_perf_counters(void)
1187{
72eae04d
RR
1188 int err;
1189
b56a3802
JSR
1190 switch (boot_cpu_data.x86_vendor) {
1191 case X86_VENDOR_INTEL:
72eae04d 1192 err = intel_pmu_init();
b56a3802 1193 break;
f87ad35d 1194 case X86_VENDOR_AMD:
72eae04d 1195 err = amd_pmu_init();
f87ad35d 1196 break;
4138960a
RR
1197 default:
1198 return;
b56a3802 1199 }
72eae04d 1200 if (err != 0)
b56a3802
JSR
1201 return;
1202
faa28ae0
RR
1203 pr_info("%s Performance Monitoring support detected.\n", x86_pmu.name);
1204 pr_info("... version: %d\n", x86_pmu.version);
1205 pr_info("... bit width: %d\n", x86_pmu.counter_bits);
1206
0933e5c6
RR
1207 pr_info("... num counters: %d\n", x86_pmu.num_counters);
1208 if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
1209 x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
241771ef 1210 WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!",
0933e5c6 1211 x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
241771ef 1212 }
0933e5c6
RR
1213 perf_counter_mask = (1 << x86_pmu.num_counters) - 1;
1214 perf_max_counters = x86_pmu.num_counters;
241771ef 1215
0933e5c6 1216 pr_info("... value mask: %016Lx\n", x86_pmu.counter_mask);
c619b8ff 1217 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
2f18d1e8 1218
0933e5c6
RR
1219 if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
1220 x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
703e937c 1221 WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!",
0933e5c6 1222 x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
703e937c 1223 }
0933e5c6 1224 pr_info("... fixed counters: %d\n", x86_pmu.num_counters_fixed);
862a1a5f 1225
0933e5c6
RR
1226 perf_counter_mask |=
1227 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
241771ef 1228
a1ef58f4 1229 pr_info("... counter mask: %016Lx\n", perf_counter_mask);
75f224cf 1230
c323d95f 1231 perf_counters_lapic_init();
241771ef 1232 register_die_notifier(&perf_counter_nmi_notifier);
241771ef 1233}
621a01ea 1234
bb775fc2 1235static inline void x86_pmu_read(struct perf_counter *counter)
ee06094f
IM
1236{
1237 x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
1238}
1239
4aeb0b42
RR
1240static const struct pmu pmu = {
1241 .enable = x86_pmu_enable,
1242 .disable = x86_pmu_disable,
1243 .read = x86_pmu_read,
a78ac325 1244 .unthrottle = x86_pmu_unthrottle,
621a01ea
IM
1245};
1246
4aeb0b42 1247const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
621a01ea
IM
1248{
1249 int err;
1250
1251 err = __hw_perf_counter_init(counter);
1252 if (err)
9ea98e19 1253 return ERR_PTR(err);
621a01ea 1254
4aeb0b42 1255 return &pmu;
621a01ea 1256}
d7d59fb3
PZ
1257
1258/*
1259 * callchain support
1260 */
1261
1262static inline
1263void callchain_store(struct perf_callchain_entry *entry, unsigned long ip)
1264{
1265 if (entry->nr < MAX_STACK_DEPTH)
1266 entry->ip[entry->nr++] = ip;
1267}
1268
1269static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry);
1270static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry);
1271
1272
1273static void
1274backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
1275{
1276 /* Ignore warnings */
1277}
1278
1279static void backtrace_warning(void *data, char *msg)
1280{
1281 /* Ignore warnings */
1282}
1283
1284static int backtrace_stack(void *data, char *name)
1285{
1286 /* Don't bother with IRQ stacks for now */
1287 return -1;
1288}
1289
1290static void backtrace_address(void *data, unsigned long addr, int reliable)
1291{
1292 struct perf_callchain_entry *entry = data;
1293
1294 if (reliable)
1295 callchain_store(entry, addr);
1296}
1297
1298static const struct stacktrace_ops backtrace_ops = {
1299 .warning = backtrace_warning,
1300 .warning_symbol = backtrace_warning_symbol,
1301 .stack = backtrace_stack,
1302 .address = backtrace_address,
1303};
1304
1305static void
1306perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
1307{
1308 unsigned long bp;
1309 char *stack;
5872bdb8 1310 int nr = entry->nr;
d7d59fb3
PZ
1311
1312 callchain_store(entry, instruction_pointer(regs));
1313
1314 stack = ((char *)regs + sizeof(struct pt_regs));
1315#ifdef CONFIG_FRAME_POINTER
1316 bp = frame_pointer(regs);
1317#else
1318 bp = 0;
1319#endif
1320
1321 dump_trace(NULL, regs, (void *)stack, bp, &backtrace_ops, entry);
5872bdb8
PZ
1322
1323 entry->kernel = entry->nr - nr;
d7d59fb3
PZ
1324}
1325
1326
1327struct stack_frame {
1328 const void __user *next_fp;
1329 unsigned long return_address;
1330};
1331
1332static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
1333{
1334 int ret;
1335
1336 if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
1337 return 0;
1338
1339 ret = 1;
1340 pagefault_disable();
1341 if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
1342 ret = 0;
1343 pagefault_enable();
1344
1345 return ret;
1346}
1347
1348static void
1349perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
1350{
1351 struct stack_frame frame;
1352 const void __user *fp;
5872bdb8 1353 int nr = entry->nr;
d7d59fb3
PZ
1354
1355 regs = (struct pt_regs *)current->thread.sp0 - 1;
1356 fp = (void __user *)regs->bp;
1357
1358 callchain_store(entry, regs->ip);
1359
1360 while (entry->nr < MAX_STACK_DEPTH) {
1361 frame.next_fp = NULL;
1362 frame.return_address = 0;
1363
1364 if (!copy_stack_frame(fp, &frame))
1365 break;
1366
1367 if ((unsigned long)fp < user_stack_pointer(regs))
1368 break;
1369
1370 callchain_store(entry, frame.return_address);
1371 fp = frame.next_fp;
1372 }
5872bdb8
PZ
1373
1374 entry->user = entry->nr - nr;
d7d59fb3
PZ
1375}
1376
1377static void
1378perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
1379{
1380 int is_user;
1381
1382 if (!regs)
1383 return;
1384
1385 is_user = user_mode(regs);
1386
1387 if (!current || current->pid == 0)
1388 return;
1389
1390 if (is_user && current->state != TASK_RUNNING)
1391 return;
1392
1393 if (!is_user)
1394 perf_callchain_kernel(regs, entry);
1395
1396 if (current->mm)
1397 perf_callchain_user(regs, entry);
1398}
1399
1400struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1401{
1402 struct perf_callchain_entry *entry;
1403
1404 if (in_nmi())
1405 entry = &__get_cpu_var(nmi_entry);
1406 else
1407 entry = &__get_cpu_var(irq_entry);
1408
1409 entry->nr = 0;
5872bdb8
PZ
1410 entry->hv = 0;
1411 entry->kernel = 0;
1412 entry->user = 0;
d7d59fb3
PZ
1413
1414 perf_do_callchain(regs, entry);
1415
1416 return entry;
1417}
This page took 0.142576 seconds and 5 git commands to generate.