ARM: 6070/1: perf-events: add support for xscale PMUs
[deliverable/linux.git] / arch / arm / kernel / perf_event.c
CommitLineData
1b8873a0
JI
1#undef DEBUG
2
3/*
4 * ARM performance counter support.
5 *
6 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
7 *
796d1295
JP
8 * ARMv7 support: Jean Pihet <jpihet@mvista.com>
9 * 2010 (c) MontaVista Software, LLC.
10 *
1b8873a0
JI
11 * This code is based on the sparc64 perf event code, which is in turn based
12 * on the x86 code. Callchain code is based on the ARM OProfile backtrace
13 * code.
14 */
15#define pr_fmt(fmt) "hw perfevents: " fmt
16
17#include <linux/interrupt.h>
18#include <linux/kernel.h>
181193f3 19#include <linux/module.h>
1b8873a0 20#include <linux/perf_event.h>
49c006b9 21#include <linux/platform_device.h>
1b8873a0
JI
22#include <linux/spinlock.h>
23#include <linux/uaccess.h>
24
25#include <asm/cputype.h>
26#include <asm/irq.h>
27#include <asm/irq_regs.h>
28#include <asm/pmu.h>
29#include <asm/stacktrace.h>
30
49c006b9 31static struct platform_device *pmu_device;
1b8873a0
JI
32
33/*
34 * Hardware lock to serialize accesses to PMU registers. Needed for the
35 * read/modify/write sequences.
36 */
37DEFINE_SPINLOCK(pmu_lock);
38
39/*
40 * ARMv6 supports a maximum of 3 events, starting from index 1. If we add
41 * another platform that supports more, we need to increase this to be the
42 * largest of all platforms.
796d1295
JP
43 *
44 * ARMv7 supports up to 32 events:
45 * cycle counter CCNT + 31 events counters CNT0..30.
46 * Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters.
1b8873a0 47 */
796d1295 48#define ARMPMU_MAX_HWEVENTS 33
1b8873a0
JI
49
50/* The events for a given CPU. */
51struct cpu_hw_events {
52 /*
53 * The events that are active on the CPU for the given index. Index 0
54 * is reserved.
55 */
56 struct perf_event *events[ARMPMU_MAX_HWEVENTS];
57
58 /*
59 * A 1 bit for an index indicates that the counter is being used for
60 * an event. A 0 means that the counter can be used.
61 */
62 unsigned long used_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
63
64 /*
65 * A 1 bit for an index indicates that the counter is actively being
66 * used.
67 */
68 unsigned long active_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
69};
70DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
71
181193f3
WD
72/* PMU names. */
73static const char *arm_pmu_names[] = {
74 [ARM_PERF_PMU_ID_XSCALE1] = "xscale1",
75 [ARM_PERF_PMU_ID_XSCALE2] = "xscale2",
76 [ARM_PERF_PMU_ID_V6] = "v6",
77 [ARM_PERF_PMU_ID_V6MP] = "v6mpcore",
78 [ARM_PERF_PMU_ID_CA8] = "ARMv7 Cortex-A8",
79 [ARM_PERF_PMU_ID_CA9] = "ARMv7 Cortex-A9",
80};
81
1b8873a0 82struct arm_pmu {
181193f3 83 enum arm_perf_pmu_ids id;
1b8873a0
JI
84 irqreturn_t (*handle_irq)(int irq_num, void *dev);
85 void (*enable)(struct hw_perf_event *evt, int idx);
86 void (*disable)(struct hw_perf_event *evt, int idx);
87 int (*event_map)(int evt);
88 u64 (*raw_event)(u64);
89 int (*get_event_idx)(struct cpu_hw_events *cpuc,
90 struct hw_perf_event *hwc);
91 u32 (*read_counter)(int idx);
92 void (*write_counter)(int idx, u32 val);
93 void (*start)(void);
94 void (*stop)(void);
95 int num_events;
96 u64 max_period;
97};
98
99/* Set at runtime when we know what CPU type we are. */
100static const struct arm_pmu *armpmu;
101
181193f3
WD
102enum arm_perf_pmu_ids
103armpmu_get_pmu_id(void)
104{
105 int id = -ENODEV;
106
107 if (armpmu != NULL)
108 id = armpmu->id;
109
110 return id;
111}
112EXPORT_SYMBOL_GPL(armpmu_get_pmu_id);
113
1b8873a0
JI
114#define HW_OP_UNSUPPORTED 0xFFFF
115
116#define C(_x) \
117 PERF_COUNT_HW_CACHE_##_x
118
119#define CACHE_OP_UNSUPPORTED 0xFFFF
120
121static unsigned armpmu_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
122 [PERF_COUNT_HW_CACHE_OP_MAX]
123 [PERF_COUNT_HW_CACHE_RESULT_MAX];
124
125static int
126armpmu_map_cache_event(u64 config)
127{
128 unsigned int cache_type, cache_op, cache_result, ret;
129
130 cache_type = (config >> 0) & 0xff;
131 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
132 return -EINVAL;
133
134 cache_op = (config >> 8) & 0xff;
135 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
136 return -EINVAL;
137
138 cache_result = (config >> 16) & 0xff;
139 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
140 return -EINVAL;
141
142 ret = (int)armpmu_perf_cache_map[cache_type][cache_op][cache_result];
143
144 if (ret == CACHE_OP_UNSUPPORTED)
145 return -ENOENT;
146
147 return ret;
148}
149
150static int
151armpmu_event_set_period(struct perf_event *event,
152 struct hw_perf_event *hwc,
153 int idx)
154{
155 s64 left = atomic64_read(&hwc->period_left);
156 s64 period = hwc->sample_period;
157 int ret = 0;
158
159 if (unlikely(left <= -period)) {
160 left = period;
161 atomic64_set(&hwc->period_left, left);
162 hwc->last_period = period;
163 ret = 1;
164 }
165
166 if (unlikely(left <= 0)) {
167 left += period;
168 atomic64_set(&hwc->period_left, left);
169 hwc->last_period = period;
170 ret = 1;
171 }
172
173 if (left > (s64)armpmu->max_period)
174 left = armpmu->max_period;
175
176 atomic64_set(&hwc->prev_count, (u64)-left);
177
178 armpmu->write_counter(idx, (u64)(-left) & 0xffffffff);
179
180 perf_event_update_userpage(event);
181
182 return ret;
183}
184
185static u64
186armpmu_event_update(struct perf_event *event,
187 struct hw_perf_event *hwc,
188 int idx)
189{
190 int shift = 64 - 32;
191 s64 prev_raw_count, new_raw_count;
192 s64 delta;
193
194again:
195 prev_raw_count = atomic64_read(&hwc->prev_count);
196 new_raw_count = armpmu->read_counter(idx);
197
198 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
199 new_raw_count) != prev_raw_count)
200 goto again;
201
202 delta = (new_raw_count << shift) - (prev_raw_count << shift);
203 delta >>= shift;
204
205 atomic64_add(delta, &event->count);
206 atomic64_sub(delta, &hwc->period_left);
207
208 return new_raw_count;
209}
210
211static void
212armpmu_disable(struct perf_event *event)
213{
214 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
215 struct hw_perf_event *hwc = &event->hw;
216 int idx = hwc->idx;
217
218 WARN_ON(idx < 0);
219
220 clear_bit(idx, cpuc->active_mask);
221 armpmu->disable(hwc, idx);
222
223 barrier();
224
225 armpmu_event_update(event, hwc, idx);
226 cpuc->events[idx] = NULL;
227 clear_bit(idx, cpuc->used_mask);
228
229 perf_event_update_userpage(event);
230}
231
232static void
233armpmu_read(struct perf_event *event)
234{
235 struct hw_perf_event *hwc = &event->hw;
236
237 /* Don't read disabled counters! */
238 if (hwc->idx < 0)
239 return;
240
241 armpmu_event_update(event, hwc, hwc->idx);
242}
243
244static void
245armpmu_unthrottle(struct perf_event *event)
246{
247 struct hw_perf_event *hwc = &event->hw;
248
249 /*
250 * Set the period again. Some counters can't be stopped, so when we
251 * were throttled we simply disabled the IRQ source and the counter
252 * may have been left counting. If we don't do this step then we may
253 * get an interrupt too soon or *way* too late if the overflow has
254 * happened since disabling.
255 */
256 armpmu_event_set_period(event, hwc, hwc->idx);
257 armpmu->enable(hwc, hwc->idx);
258}
259
260static int
261armpmu_enable(struct perf_event *event)
262{
263 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
264 struct hw_perf_event *hwc = &event->hw;
265 int idx;
266 int err = 0;
267
268 /* If we don't have a space for the counter then finish early. */
269 idx = armpmu->get_event_idx(cpuc, hwc);
270 if (idx < 0) {
271 err = idx;
272 goto out;
273 }
274
275 /*
276 * If there is an event in the counter we are going to use then make
277 * sure it is disabled.
278 */
279 event->hw.idx = idx;
280 armpmu->disable(hwc, idx);
281 cpuc->events[idx] = event;
282 set_bit(idx, cpuc->active_mask);
283
284 /* Set the period for the event. */
285 armpmu_event_set_period(event, hwc, idx);
286
287 /* Enable the event. */
288 armpmu->enable(hwc, idx);
289
290 /* Propagate our changes to the userspace mapping. */
291 perf_event_update_userpage(event);
292
293out:
294 return err;
295}
296
297static struct pmu pmu = {
298 .enable = armpmu_enable,
299 .disable = armpmu_disable,
300 .unthrottle = armpmu_unthrottle,
301 .read = armpmu_read,
302};
303
304static int
305validate_event(struct cpu_hw_events *cpuc,
306 struct perf_event *event)
307{
308 struct hw_perf_event fake_event = event->hw;
309
310 if (event->pmu && event->pmu != &pmu)
311 return 0;
312
313 return armpmu->get_event_idx(cpuc, &fake_event) >= 0;
314}
315
316static int
317validate_group(struct perf_event *event)
318{
319 struct perf_event *sibling, *leader = event->group_leader;
320 struct cpu_hw_events fake_pmu;
321
322 memset(&fake_pmu, 0, sizeof(fake_pmu));
323
324 if (!validate_event(&fake_pmu, leader))
325 return -ENOSPC;
326
327 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
328 if (!validate_event(&fake_pmu, sibling))
329 return -ENOSPC;
330 }
331
332 if (!validate_event(&fake_pmu, event))
333 return -ENOSPC;
334
335 return 0;
336}
337
338static int
339armpmu_reserve_hardware(void)
340{
49c006b9 341 int i, err = -ENODEV, irq;
1b8873a0 342
49c006b9
WD
343 pmu_device = reserve_pmu(ARM_PMU_DEVICE_CPU);
344 if (IS_ERR(pmu_device)) {
1b8873a0 345 pr_warning("unable to reserve pmu\n");
49c006b9 346 return PTR_ERR(pmu_device);
1b8873a0
JI
347 }
348
49c006b9 349 init_pmu(ARM_PMU_DEVICE_CPU);
1b8873a0 350
49c006b9 351 if (pmu_device->num_resources < 1) {
1b8873a0
JI
352 pr_err("no irqs for PMUs defined\n");
353 return -ENODEV;
354 }
355
49c006b9
WD
356 for (i = 0; i < pmu_device->num_resources; ++i) {
357 irq = platform_get_irq(pmu_device, i);
358 if (irq < 0)
359 continue;
360
361 err = request_irq(irq, armpmu->handle_irq,
ddee87f2
WD
362 IRQF_DISABLED | IRQF_NOBALANCING,
363 "armpmu", NULL);
1b8873a0 364 if (err) {
49c006b9
WD
365 pr_warning("unable to request IRQ%d for ARM perf "
366 "counters\n", irq);
1b8873a0
JI
367 break;
368 }
369 }
370
371 if (err) {
49c006b9
WD
372 for (i = i - 1; i >= 0; --i) {
373 irq = platform_get_irq(pmu_device, i);
374 if (irq >= 0)
375 free_irq(irq, NULL);
376 }
377 release_pmu(pmu_device);
378 pmu_device = NULL;
1b8873a0
JI
379 }
380
381 return err;
382}
383
384static void
385armpmu_release_hardware(void)
386{
49c006b9 387 int i, irq;
1b8873a0 388
49c006b9
WD
389 for (i = pmu_device->num_resources - 1; i >= 0; --i) {
390 irq = platform_get_irq(pmu_device, i);
391 if (irq >= 0)
392 free_irq(irq, NULL);
393 }
1b8873a0
JI
394 armpmu->stop();
395
49c006b9
WD
396 release_pmu(pmu_device);
397 pmu_device = NULL;
1b8873a0
JI
398}
399
400static atomic_t active_events = ATOMIC_INIT(0);
401static DEFINE_MUTEX(pmu_reserve_mutex);
402
403static void
404hw_perf_event_destroy(struct perf_event *event)
405{
406 if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) {
407 armpmu_release_hardware();
408 mutex_unlock(&pmu_reserve_mutex);
409 }
410}
411
412static int
413__hw_perf_event_init(struct perf_event *event)
414{
415 struct hw_perf_event *hwc = &event->hw;
416 int mapping, err;
417
418 /* Decode the generic type into an ARM event identifier. */
419 if (PERF_TYPE_HARDWARE == event->attr.type) {
420 mapping = armpmu->event_map(event->attr.config);
421 } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
422 mapping = armpmu_map_cache_event(event->attr.config);
423 } else if (PERF_TYPE_RAW == event->attr.type) {
424 mapping = armpmu->raw_event(event->attr.config);
425 } else {
426 pr_debug("event type %x not supported\n", event->attr.type);
427 return -EOPNOTSUPP;
428 }
429
430 if (mapping < 0) {
431 pr_debug("event %x:%llx not supported\n", event->attr.type,
432 event->attr.config);
433 return mapping;
434 }
435
436 /*
437 * Check whether we need to exclude the counter from certain modes.
438 * The ARM performance counters are on all of the time so if someone
439 * has asked us for some excludes then we have to fail.
440 */
441 if (event->attr.exclude_kernel || event->attr.exclude_user ||
442 event->attr.exclude_hv || event->attr.exclude_idle) {
443 pr_debug("ARM performance counters do not support "
444 "mode exclusion\n");
445 return -EPERM;
446 }
447
448 /*
449 * We don't assign an index until we actually place the event onto
450 * hardware. Use -1 to signify that we haven't decided where to put it
451 * yet. For SMP systems, each core has it's own PMU so we can't do any
452 * clever allocation or constraints checking at this point.
453 */
454 hwc->idx = -1;
455
456 /*
457 * Store the event encoding into the config_base field. config and
458 * event_base are unused as the only 2 things we need to know are
459 * the event mapping and the counter to use. The counter to use is
460 * also the indx and the config_base is the event type.
461 */
462 hwc->config_base = (unsigned long)mapping;
463 hwc->config = 0;
464 hwc->event_base = 0;
465
466 if (!hwc->sample_period) {
467 hwc->sample_period = armpmu->max_period;
468 hwc->last_period = hwc->sample_period;
469 atomic64_set(&hwc->period_left, hwc->sample_period);
470 }
471
472 err = 0;
473 if (event->group_leader != event) {
474 err = validate_group(event);
475 if (err)
476 return -EINVAL;
477 }
478
479 return err;
480}
481
482const struct pmu *
483hw_perf_event_init(struct perf_event *event)
484{
485 int err = 0;
486
487 if (!armpmu)
488 return ERR_PTR(-ENODEV);
489
490 event->destroy = hw_perf_event_destroy;
491
492 if (!atomic_inc_not_zero(&active_events)) {
493 if (atomic_read(&active_events) > perf_max_events) {
494 atomic_dec(&active_events);
495 return ERR_PTR(-ENOSPC);
496 }
497
498 mutex_lock(&pmu_reserve_mutex);
499 if (atomic_read(&active_events) == 0) {
500 err = armpmu_reserve_hardware();
501 }
502
503 if (!err)
504 atomic_inc(&active_events);
505 mutex_unlock(&pmu_reserve_mutex);
506 }
507
508 if (err)
509 return ERR_PTR(err);
510
511 err = __hw_perf_event_init(event);
512 if (err)
513 hw_perf_event_destroy(event);
514
515 return err ? ERR_PTR(err) : &pmu;
516}
517
518void
519hw_perf_enable(void)
520{
521 /* Enable all of the perf events on hardware. */
522 int idx;
523 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
524
525 if (!armpmu)
526 return;
527
528 for (idx = 0; idx <= armpmu->num_events; ++idx) {
529 struct perf_event *event = cpuc->events[idx];
530
531 if (!event)
532 continue;
533
534 armpmu->enable(&event->hw, idx);
535 }
536
537 armpmu->start();
538}
539
540void
541hw_perf_disable(void)
542{
543 if (armpmu)
544 armpmu->stop();
545}
546
547/*
548 * ARMv6 Performance counter handling code.
549 *
550 * ARMv6 has 2 configurable performance counters and a single cycle counter.
551 * They all share a single reset bit but can be written to zero so we can use
552 * that for a reset.
553 *
554 * The counters can't be individually enabled or disabled so when we remove
555 * one event and replace it with another we could get spurious counts from the
556 * wrong event. However, we can take advantage of the fact that the
557 * performance counters can export events to the event bus, and the event bus
558 * itself can be monitored. This requires that we *don't* export the events to
559 * the event bus. The procedure for disabling a configurable counter is:
560 * - change the counter to count the ETMEXTOUT[0] signal (0x20). This
561 * effectively stops the counter from counting.
562 * - disable the counter's interrupt generation (each counter has it's
563 * own interrupt enable bit).
564 * Once stopped, the counter value can be written as 0 to reset.
565 *
566 * To enable a counter:
567 * - enable the counter's interrupt generation.
568 * - set the new event type.
569 *
570 * Note: the dedicated cycle counter only counts cycles and can't be
571 * enabled/disabled independently of the others. When we want to disable the
572 * cycle counter, we have to just disable the interrupt reporting and start
573 * ignoring that counter. When re-enabling, we have to reset the value and
574 * enable the interrupt.
575 */
576
577enum armv6_perf_types {
578 ARMV6_PERFCTR_ICACHE_MISS = 0x0,
579 ARMV6_PERFCTR_IBUF_STALL = 0x1,
580 ARMV6_PERFCTR_DDEP_STALL = 0x2,
581 ARMV6_PERFCTR_ITLB_MISS = 0x3,
582 ARMV6_PERFCTR_DTLB_MISS = 0x4,
583 ARMV6_PERFCTR_BR_EXEC = 0x5,
584 ARMV6_PERFCTR_BR_MISPREDICT = 0x6,
585 ARMV6_PERFCTR_INSTR_EXEC = 0x7,
586 ARMV6_PERFCTR_DCACHE_HIT = 0x9,
587 ARMV6_PERFCTR_DCACHE_ACCESS = 0xA,
588 ARMV6_PERFCTR_DCACHE_MISS = 0xB,
589 ARMV6_PERFCTR_DCACHE_WBACK = 0xC,
590 ARMV6_PERFCTR_SW_PC_CHANGE = 0xD,
591 ARMV6_PERFCTR_MAIN_TLB_MISS = 0xF,
592 ARMV6_PERFCTR_EXPL_D_ACCESS = 0x10,
593 ARMV6_PERFCTR_LSU_FULL_STALL = 0x11,
594 ARMV6_PERFCTR_WBUF_DRAINED = 0x12,
595 ARMV6_PERFCTR_CPU_CYCLES = 0xFF,
596 ARMV6_PERFCTR_NOP = 0x20,
597};
598
599enum armv6_counters {
600 ARMV6_CYCLE_COUNTER = 1,
601 ARMV6_COUNTER0,
602 ARMV6_COUNTER1,
603};
604
605/*
606 * The hardware events that we support. We do support cache operations but
607 * we have harvard caches and no way to combine instruction and data
608 * accesses/misses in hardware.
609 */
610static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = {
611 [PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES,
612 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC,
613 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
614 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
615 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC,
616 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT,
617 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
618};
619
620static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
621 [PERF_COUNT_HW_CACHE_OP_MAX]
622 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
623 [C(L1D)] = {
624 /*
625 * The performance counters don't differentiate between read
626 * and write accesses/misses so this isn't strictly correct,
627 * but it's the best we can do. Writes and reads get
628 * combined.
629 */
630 [C(OP_READ)] = {
631 [C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS,
632 [C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS,
633 },
634 [C(OP_WRITE)] = {
635 [C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS,
636 [C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS,
637 },
638 [C(OP_PREFETCH)] = {
639 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
640 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
641 },
642 },
643 [C(L1I)] = {
644 [C(OP_READ)] = {
645 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
646 [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS,
647 },
648 [C(OP_WRITE)] = {
649 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
650 [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS,
651 },
652 [C(OP_PREFETCH)] = {
653 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
654 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
655 },
656 },
657 [C(LL)] = {
658 [C(OP_READ)] = {
659 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
660 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
661 },
662 [C(OP_WRITE)] = {
663 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
664 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
665 },
666 [C(OP_PREFETCH)] = {
667 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
668 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
669 },
670 },
671 [C(DTLB)] = {
672 /*
673 * The ARM performance counters can count micro DTLB misses,
674 * micro ITLB misses and main TLB misses. There isn't an event
675 * for TLB misses, so use the micro misses here and if users
676 * want the main TLB misses they can use a raw counter.
677 */
678 [C(OP_READ)] = {
679 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
680 [C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS,
681 },
682 [C(OP_WRITE)] = {
683 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
684 [C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS,
685 },
686 [C(OP_PREFETCH)] = {
687 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
688 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
689 },
690 },
691 [C(ITLB)] = {
692 [C(OP_READ)] = {
693 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
694 [C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS,
695 },
696 [C(OP_WRITE)] = {
697 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
698 [C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS,
699 },
700 [C(OP_PREFETCH)] = {
701 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
702 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
703 },
704 },
705 [C(BPU)] = {
706 [C(OP_READ)] = {
707 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
708 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
709 },
710 [C(OP_WRITE)] = {
711 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
712 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
713 },
714 [C(OP_PREFETCH)] = {
715 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
716 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
717 },
718 },
719};
720
721enum armv6mpcore_perf_types {
722 ARMV6MPCORE_PERFCTR_ICACHE_MISS = 0x0,
723 ARMV6MPCORE_PERFCTR_IBUF_STALL = 0x1,
724 ARMV6MPCORE_PERFCTR_DDEP_STALL = 0x2,
725 ARMV6MPCORE_PERFCTR_ITLB_MISS = 0x3,
726 ARMV6MPCORE_PERFCTR_DTLB_MISS = 0x4,
727 ARMV6MPCORE_PERFCTR_BR_EXEC = 0x5,
728 ARMV6MPCORE_PERFCTR_BR_NOTPREDICT = 0x6,
729 ARMV6MPCORE_PERFCTR_BR_MISPREDICT = 0x7,
730 ARMV6MPCORE_PERFCTR_INSTR_EXEC = 0x8,
731 ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS = 0xA,
732 ARMV6MPCORE_PERFCTR_DCACHE_RDMISS = 0xB,
733 ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS = 0xC,
734 ARMV6MPCORE_PERFCTR_DCACHE_WRMISS = 0xD,
735 ARMV6MPCORE_PERFCTR_DCACHE_EVICTION = 0xE,
736 ARMV6MPCORE_PERFCTR_SW_PC_CHANGE = 0xF,
737 ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS = 0x10,
738 ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS = 0x11,
739 ARMV6MPCORE_PERFCTR_LSU_FULL_STALL = 0x12,
740 ARMV6MPCORE_PERFCTR_WBUF_DRAINED = 0x13,
741 ARMV6MPCORE_PERFCTR_CPU_CYCLES = 0xFF,
742};
743
744/*
745 * The hardware events that we support. We do support cache operations but
746 * we have harvard caches and no way to combine instruction and data
747 * accesses/misses in hardware.
748 */
749static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = {
750 [PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES,
751 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC,
752 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
753 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
754 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC,
755 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
756 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
757};
758
759static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
760 [PERF_COUNT_HW_CACHE_OP_MAX]
761 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
762 [C(L1D)] = {
763 [C(OP_READ)] = {
764 [C(RESULT_ACCESS)] =
765 ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS,
766 [C(RESULT_MISS)] =
767 ARMV6MPCORE_PERFCTR_DCACHE_RDMISS,
768 },
769 [C(OP_WRITE)] = {
770 [C(RESULT_ACCESS)] =
771 ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS,
772 [C(RESULT_MISS)] =
773 ARMV6MPCORE_PERFCTR_DCACHE_WRMISS,
774 },
775 [C(OP_PREFETCH)] = {
776 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
777 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
778 },
779 },
780 [C(L1I)] = {
781 [C(OP_READ)] = {
782 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
783 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
784 },
785 [C(OP_WRITE)] = {
786 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
787 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
788 },
789 [C(OP_PREFETCH)] = {
790 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
791 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
792 },
793 },
794 [C(LL)] = {
795 [C(OP_READ)] = {
796 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
797 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
798 },
799 [C(OP_WRITE)] = {
800 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
801 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
802 },
803 [C(OP_PREFETCH)] = {
804 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
805 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
806 },
807 },
808 [C(DTLB)] = {
809 /*
810 * The ARM performance counters can count micro DTLB misses,
811 * micro ITLB misses and main TLB misses. There isn't an event
812 * for TLB misses, so use the micro misses here and if users
813 * want the main TLB misses they can use a raw counter.
814 */
815 [C(OP_READ)] = {
816 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
817 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS,
818 },
819 [C(OP_WRITE)] = {
820 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
821 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS,
822 },
823 [C(OP_PREFETCH)] = {
824 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
825 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
826 },
827 },
828 [C(ITLB)] = {
829 [C(OP_READ)] = {
830 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
831 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS,
832 },
833 [C(OP_WRITE)] = {
834 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
835 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS,
836 },
837 [C(OP_PREFETCH)] = {
838 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
839 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
840 },
841 },
842 [C(BPU)] = {
843 [C(OP_READ)] = {
844 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
845 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
846 },
847 [C(OP_WRITE)] = {
848 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
849 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
850 },
851 [C(OP_PREFETCH)] = {
852 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
853 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
854 },
855 },
856};
857
858static inline unsigned long
859armv6_pmcr_read(void)
860{
861 u32 val;
862 asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r"(val));
863 return val;
864}
865
866static inline void
867armv6_pmcr_write(unsigned long val)
868{
869 asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r"(val));
870}
871
872#define ARMV6_PMCR_ENABLE (1 << 0)
873#define ARMV6_PMCR_CTR01_RESET (1 << 1)
874#define ARMV6_PMCR_CCOUNT_RESET (1 << 2)
875#define ARMV6_PMCR_CCOUNT_DIV (1 << 3)
876#define ARMV6_PMCR_COUNT0_IEN (1 << 4)
877#define ARMV6_PMCR_COUNT1_IEN (1 << 5)
878#define ARMV6_PMCR_CCOUNT_IEN (1 << 6)
879#define ARMV6_PMCR_COUNT0_OVERFLOW (1 << 8)
880#define ARMV6_PMCR_COUNT1_OVERFLOW (1 << 9)
881#define ARMV6_PMCR_CCOUNT_OVERFLOW (1 << 10)
882#define ARMV6_PMCR_EVT_COUNT0_SHIFT 20
883#define ARMV6_PMCR_EVT_COUNT0_MASK (0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT)
884#define ARMV6_PMCR_EVT_COUNT1_SHIFT 12
885#define ARMV6_PMCR_EVT_COUNT1_MASK (0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT)
886
887#define ARMV6_PMCR_OVERFLOWED_MASK \
888 (ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \
889 ARMV6_PMCR_CCOUNT_OVERFLOW)
890
891static inline int
892armv6_pmcr_has_overflowed(unsigned long pmcr)
893{
894 return (pmcr & ARMV6_PMCR_OVERFLOWED_MASK);
895}
896
897static inline int
898armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
899 enum armv6_counters counter)
900{
901 int ret = 0;
902
903 if (ARMV6_CYCLE_COUNTER == counter)
904 ret = pmcr & ARMV6_PMCR_CCOUNT_OVERFLOW;
905 else if (ARMV6_COUNTER0 == counter)
906 ret = pmcr & ARMV6_PMCR_COUNT0_OVERFLOW;
907 else if (ARMV6_COUNTER1 == counter)
908 ret = pmcr & ARMV6_PMCR_COUNT1_OVERFLOW;
909 else
910 WARN_ONCE(1, "invalid counter number (%d)\n", counter);
911
912 return ret;
913}
914
915static inline u32
916armv6pmu_read_counter(int counter)
917{
918 unsigned long value = 0;
919
920 if (ARMV6_CYCLE_COUNTER == counter)
921 asm volatile("mrc p15, 0, %0, c15, c12, 1" : "=r"(value));
922 else if (ARMV6_COUNTER0 == counter)
923 asm volatile("mrc p15, 0, %0, c15, c12, 2" : "=r"(value));
924 else if (ARMV6_COUNTER1 == counter)
925 asm volatile("mrc p15, 0, %0, c15, c12, 3" : "=r"(value));
926 else
927 WARN_ONCE(1, "invalid counter number (%d)\n", counter);
928
929 return value;
930}
931
932static inline void
933armv6pmu_write_counter(int counter,
934 u32 value)
935{
936 if (ARMV6_CYCLE_COUNTER == counter)
937 asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value));
938 else if (ARMV6_COUNTER0 == counter)
939 asm volatile("mcr p15, 0, %0, c15, c12, 2" : : "r"(value));
940 else if (ARMV6_COUNTER1 == counter)
941 asm volatile("mcr p15, 0, %0, c15, c12, 3" : : "r"(value));
942 else
943 WARN_ONCE(1, "invalid counter number (%d)\n", counter);
944}
945
946void
947armv6pmu_enable_event(struct hw_perf_event *hwc,
948 int idx)
949{
950 unsigned long val, mask, evt, flags;
951
952 if (ARMV6_CYCLE_COUNTER == idx) {
953 mask = 0;
954 evt = ARMV6_PMCR_CCOUNT_IEN;
955 } else if (ARMV6_COUNTER0 == idx) {
956 mask = ARMV6_PMCR_EVT_COUNT0_MASK;
957 evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT0_SHIFT) |
958 ARMV6_PMCR_COUNT0_IEN;
959 } else if (ARMV6_COUNTER1 == idx) {
960 mask = ARMV6_PMCR_EVT_COUNT1_MASK;
961 evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT1_SHIFT) |
962 ARMV6_PMCR_COUNT1_IEN;
963 } else {
964 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
965 return;
966 }
967
968 /*
969 * Mask out the current event and set the counter to count the event
970 * that we're interested in.
971 */
972 spin_lock_irqsave(&pmu_lock, flags);
973 val = armv6_pmcr_read();
974 val &= ~mask;
975 val |= evt;
976 armv6_pmcr_write(val);
977 spin_unlock_irqrestore(&pmu_lock, flags);
978}
979
980static irqreturn_t
981armv6pmu_handle_irq(int irq_num,
982 void *dev)
983{
984 unsigned long pmcr = armv6_pmcr_read();
985 struct perf_sample_data data;
986 struct cpu_hw_events *cpuc;
987 struct pt_regs *regs;
988 int idx;
989
990 if (!armv6_pmcr_has_overflowed(pmcr))
991 return IRQ_NONE;
992
993 regs = get_irq_regs();
994
995 /*
996 * The interrupts are cleared by writing the overflow flags back to
997 * the control register. All of the other bits don't have any effect
998 * if they are rewritten, so write the whole value back.
999 */
1000 armv6_pmcr_write(pmcr);
1001
dc1d628a 1002 perf_sample_data_init(&data, 0);
1b8873a0
JI
1003
1004 cpuc = &__get_cpu_var(cpu_hw_events);
1005 for (idx = 0; idx <= armpmu->num_events; ++idx) {
1006 struct perf_event *event = cpuc->events[idx];
1007 struct hw_perf_event *hwc;
1008
1009 if (!test_bit(idx, cpuc->active_mask))
1010 continue;
1011
1012 /*
1013 * We have a single interrupt for all counters. Check that
1014 * each counter has overflowed before we process it.
1015 */
1016 if (!armv6_pmcr_counter_has_overflowed(pmcr, idx))
1017 continue;
1018
1019 hwc = &event->hw;
1020 armpmu_event_update(event, hwc, idx);
1021 data.period = event->hw.last_period;
1022 if (!armpmu_event_set_period(event, hwc, idx))
1023 continue;
1024
1025 if (perf_event_overflow(event, 0, &data, regs))
1026 armpmu->disable(hwc, idx);
1027 }
1028
1029 /*
1030 * Handle the pending perf events.
1031 *
1032 * Note: this call *must* be run with interrupts enabled. For
1033 * platforms that can have the PMU interrupts raised as a PMI, this
1034 * will not work.
1035 */
1036 perf_event_do_pending();
1037
1038 return IRQ_HANDLED;
1039}
1040
1041static void
1042armv6pmu_start(void)
1043{
1044 unsigned long flags, val;
1045
1046 spin_lock_irqsave(&pmu_lock, flags);
1047 val = armv6_pmcr_read();
1048 val |= ARMV6_PMCR_ENABLE;
1049 armv6_pmcr_write(val);
1050 spin_unlock_irqrestore(&pmu_lock, flags);
1051}
1052
1053void
1054armv6pmu_stop(void)
1055{
1056 unsigned long flags, val;
1057
1058 spin_lock_irqsave(&pmu_lock, flags);
1059 val = armv6_pmcr_read();
1060 val &= ~ARMV6_PMCR_ENABLE;
1061 armv6_pmcr_write(val);
1062 spin_unlock_irqrestore(&pmu_lock, flags);
1063}
1064
1065static inline int
1066armv6pmu_event_map(int config)
1067{
1068 int mapping = armv6_perf_map[config];
1069 if (HW_OP_UNSUPPORTED == mapping)
1070 mapping = -EOPNOTSUPP;
1071 return mapping;
1072}
1073
1074static inline int
1075armv6mpcore_pmu_event_map(int config)
1076{
1077 int mapping = armv6mpcore_perf_map[config];
1078 if (HW_OP_UNSUPPORTED == mapping)
1079 mapping = -EOPNOTSUPP;
1080 return mapping;
1081}
1082
1083static u64
1084armv6pmu_raw_event(u64 config)
1085{
1086 return config & 0xff;
1087}
1088
1089static int
1090armv6pmu_get_event_idx(struct cpu_hw_events *cpuc,
1091 struct hw_perf_event *event)
1092{
1093 /* Always place a cycle counter into the cycle counter. */
1094 if (ARMV6_PERFCTR_CPU_CYCLES == event->config_base) {
1095 if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask))
1096 return -EAGAIN;
1097
1098 return ARMV6_CYCLE_COUNTER;
1099 } else {
1100 /*
1101 * For anything other than a cycle counter, try and use
1102 * counter0 and counter1.
1103 */
1104 if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask)) {
1105 return ARMV6_COUNTER1;
1106 }
1107
1108 if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask)) {
1109 return ARMV6_COUNTER0;
1110 }
1111
1112 /* The counters are all in use. */
1113 return -EAGAIN;
1114 }
1115}
1116
1117static void
1118armv6pmu_disable_event(struct hw_perf_event *hwc,
1119 int idx)
1120{
1121 unsigned long val, mask, evt, flags;
1122
1123 if (ARMV6_CYCLE_COUNTER == idx) {
1124 mask = ARMV6_PMCR_CCOUNT_IEN;
1125 evt = 0;
1126 } else if (ARMV6_COUNTER0 == idx) {
1127 mask = ARMV6_PMCR_COUNT0_IEN | ARMV6_PMCR_EVT_COUNT0_MASK;
1128 evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT0_SHIFT;
1129 } else if (ARMV6_COUNTER1 == idx) {
1130 mask = ARMV6_PMCR_COUNT1_IEN | ARMV6_PMCR_EVT_COUNT1_MASK;
1131 evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT1_SHIFT;
1132 } else {
1133 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
1134 return;
1135 }
1136
1137 /*
1138 * Mask out the current event and set the counter to count the number
1139 * of ETM bus signal assertion cycles. The external reporting should
1140 * be disabled and so this should never increment.
1141 */
1142 spin_lock_irqsave(&pmu_lock, flags);
1143 val = armv6_pmcr_read();
1144 val &= ~mask;
1145 val |= evt;
1146 armv6_pmcr_write(val);
1147 spin_unlock_irqrestore(&pmu_lock, flags);
1148}
1149
1150static void
1151armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
1152 int idx)
1153{
1154 unsigned long val, mask, flags, evt = 0;
1155
1156 if (ARMV6_CYCLE_COUNTER == idx) {
1157 mask = ARMV6_PMCR_CCOUNT_IEN;
1158 } else if (ARMV6_COUNTER0 == idx) {
1159 mask = ARMV6_PMCR_COUNT0_IEN;
1160 } else if (ARMV6_COUNTER1 == idx) {
1161 mask = ARMV6_PMCR_COUNT1_IEN;
1162 } else {
1163 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
1164 return;
1165 }
1166
1167 /*
1168 * Unlike UP ARMv6, we don't have a way of stopping the counters. We
1169 * simply disable the interrupt reporting.
1170 */
1171 spin_lock_irqsave(&pmu_lock, flags);
1172 val = armv6_pmcr_read();
1173 val &= ~mask;
1174 val |= evt;
1175 armv6_pmcr_write(val);
1176 spin_unlock_irqrestore(&pmu_lock, flags);
1177}
1178
1179static const struct arm_pmu armv6pmu = {
181193f3 1180 .id = ARM_PERF_PMU_ID_V6,
1b8873a0
JI
1181 .handle_irq = armv6pmu_handle_irq,
1182 .enable = armv6pmu_enable_event,
1183 .disable = armv6pmu_disable_event,
1184 .event_map = armv6pmu_event_map,
1185 .raw_event = armv6pmu_raw_event,
1186 .read_counter = armv6pmu_read_counter,
1187 .write_counter = armv6pmu_write_counter,
1188 .get_event_idx = armv6pmu_get_event_idx,
1189 .start = armv6pmu_start,
1190 .stop = armv6pmu_stop,
1191 .num_events = 3,
1192 .max_period = (1LLU << 32) - 1,
1193};
1194
1195/*
1196 * ARMv6mpcore is almost identical to single core ARMv6 with the exception
1197 * that some of the events have different enumerations and that there is no
1198 * *hack* to stop the programmable counters. To stop the counters we simply
1199 * disable the interrupt reporting and update the event. When unthrottling we
1200 * reset the period and enable the interrupt reporting.
1201 */
1202static const struct arm_pmu armv6mpcore_pmu = {
181193f3 1203 .id = ARM_PERF_PMU_ID_V6MP,
1b8873a0
JI
1204 .handle_irq = armv6pmu_handle_irq,
1205 .enable = armv6pmu_enable_event,
1206 .disable = armv6mpcore_pmu_disable_event,
1207 .event_map = armv6mpcore_pmu_event_map,
1208 .raw_event = armv6pmu_raw_event,
1209 .read_counter = armv6pmu_read_counter,
1210 .write_counter = armv6pmu_write_counter,
1211 .get_event_idx = armv6pmu_get_event_idx,
1212 .start = armv6pmu_start,
1213 .stop = armv6pmu_stop,
1214 .num_events = 3,
1215 .max_period = (1LLU << 32) - 1,
1216};
1217
796d1295
JP
1218/*
1219 * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
1220 *
1221 * Copied from ARMv6 code, with the low level code inspired
1222 * by the ARMv7 Oprofile code.
1223 *
1224 * Cortex-A8 has up to 4 configurable performance counters and
1225 * a single cycle counter.
1226 * Cortex-A9 has up to 31 configurable performance counters and
1227 * a single cycle counter.
1228 *
1229 * All counters can be enabled/disabled and IRQ masked separately. The cycle
1230 * counter and all 4 performance counters together can be reset separately.
1231 */
1232
796d1295
JP
1233/* Common ARMv7 event types */
1234enum armv7_perf_types {
1235 ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
1236 ARMV7_PERFCTR_IFETCH_MISS = 0x01,
1237 ARMV7_PERFCTR_ITLB_MISS = 0x02,
1238 ARMV7_PERFCTR_DCACHE_REFILL = 0x03,
1239 ARMV7_PERFCTR_DCACHE_ACCESS = 0x04,
1240 ARMV7_PERFCTR_DTLB_REFILL = 0x05,
1241 ARMV7_PERFCTR_DREAD = 0x06,
1242 ARMV7_PERFCTR_DWRITE = 0x07,
1243
1244 ARMV7_PERFCTR_EXC_TAKEN = 0x09,
1245 ARMV7_PERFCTR_EXC_EXECUTED = 0x0A,
1246 ARMV7_PERFCTR_CID_WRITE = 0x0B,
1247 /* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
1248 * It counts:
1249 * - all branch instructions,
1250 * - instructions that explicitly write the PC,
1251 * - exception generating instructions.
1252 */
1253 ARMV7_PERFCTR_PC_WRITE = 0x0C,
1254 ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D,
1255 ARMV7_PERFCTR_UNALIGNED_ACCESS = 0x0F,
1256 ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
1257 ARMV7_PERFCTR_CLOCK_CYCLES = 0x11,
1258
1259 ARMV7_PERFCTR_PC_BRANCH_MIS_USED = 0x12,
1260
1261 ARMV7_PERFCTR_CPU_CYCLES = 0xFF
1262};
1263
1264/* ARMv7 Cortex-A8 specific event types */
1265enum armv7_a8_perf_types {
1266 ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
1267
1268 ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
1269
1270 ARMV7_PERFCTR_WRITE_BUFFER_FULL = 0x40,
1271 ARMV7_PERFCTR_L2_STORE_MERGED = 0x41,
1272 ARMV7_PERFCTR_L2_STORE_BUFF = 0x42,
1273 ARMV7_PERFCTR_L2_ACCESS = 0x43,
1274 ARMV7_PERFCTR_L2_CACH_MISS = 0x44,
1275 ARMV7_PERFCTR_AXI_READ_CYCLES = 0x45,
1276 ARMV7_PERFCTR_AXI_WRITE_CYCLES = 0x46,
1277 ARMV7_PERFCTR_MEMORY_REPLAY = 0x47,
1278 ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY = 0x48,
1279 ARMV7_PERFCTR_L1_DATA_MISS = 0x49,
1280 ARMV7_PERFCTR_L1_INST_MISS = 0x4A,
1281 ARMV7_PERFCTR_L1_DATA_COLORING = 0x4B,
1282 ARMV7_PERFCTR_L1_NEON_DATA = 0x4C,
1283 ARMV7_PERFCTR_L1_NEON_CACH_DATA = 0x4D,
1284 ARMV7_PERFCTR_L2_NEON = 0x4E,
1285 ARMV7_PERFCTR_L2_NEON_HIT = 0x4F,
1286 ARMV7_PERFCTR_L1_INST = 0x50,
1287 ARMV7_PERFCTR_PC_RETURN_MIS_PRED = 0x51,
1288 ARMV7_PERFCTR_PC_BRANCH_FAILED = 0x52,
1289 ARMV7_PERFCTR_PC_BRANCH_TAKEN = 0x53,
1290 ARMV7_PERFCTR_PC_BRANCH_EXECUTED = 0x54,
1291 ARMV7_PERFCTR_OP_EXECUTED = 0x55,
1292 ARMV7_PERFCTR_CYCLES_INST_STALL = 0x56,
1293 ARMV7_PERFCTR_CYCLES_INST = 0x57,
1294 ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL = 0x58,
1295 ARMV7_PERFCTR_CYCLES_NEON_INST_STALL = 0x59,
1296 ARMV7_PERFCTR_NEON_CYCLES = 0x5A,
1297
1298 ARMV7_PERFCTR_PMU0_EVENTS = 0x70,
1299 ARMV7_PERFCTR_PMU1_EVENTS = 0x71,
1300 ARMV7_PERFCTR_PMU_EVENTS = 0x72,
1301};
1302
1303/* ARMv7 Cortex-A9 specific event types */
1304enum armv7_a9_perf_types {
1305 ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC = 0x40,
1306 ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC = 0x41,
1307 ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC = 0x42,
1308
1309 ARMV7_PERFCTR_COHERENT_LINE_MISS = 0x50,
1310 ARMV7_PERFCTR_COHERENT_LINE_HIT = 0x51,
1311
1312 ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES = 0x60,
1313 ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES = 0x61,
1314 ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES = 0x62,
1315 ARMV7_PERFCTR_STREX_EXECUTED_PASSED = 0x63,
1316 ARMV7_PERFCTR_STREX_EXECUTED_FAILED = 0x64,
1317 ARMV7_PERFCTR_DATA_EVICTION = 0x65,
1318 ARMV7_PERFCTR_ISSUE_STAGE_NO_INST = 0x66,
1319 ARMV7_PERFCTR_ISSUE_STAGE_EMPTY = 0x67,
1320 ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE = 0x68,
1321
1322 ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS = 0x6E,
1323
1324 ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST = 0x70,
1325 ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST = 0x71,
1326 ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST = 0x72,
1327 ARMV7_PERFCTR_FP_EXECUTED_INST = 0x73,
1328 ARMV7_PERFCTR_NEON_EXECUTED_INST = 0x74,
1329
1330 ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES = 0x80,
1331 ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES = 0x81,
1332 ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES = 0x82,
1333 ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES = 0x83,
1334 ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES = 0x84,
1335 ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES = 0x85,
1336 ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES = 0x86,
1337
1338 ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES = 0x8A,
1339 ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES = 0x8B,
1340
1341 ARMV7_PERFCTR_ISB_INST = 0x90,
1342 ARMV7_PERFCTR_DSB_INST = 0x91,
1343 ARMV7_PERFCTR_DMB_INST = 0x92,
1344 ARMV7_PERFCTR_EXT_INTERRUPTS = 0x93,
1345
1346 ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED = 0xA0,
1347 ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED = 0xA1,
1348 ARMV7_PERFCTR_PLE_FIFO_FLUSH = 0xA2,
1349 ARMV7_PERFCTR_PLE_RQST_COMPLETED = 0xA3,
1350 ARMV7_PERFCTR_PLE_FIFO_OVERFLOW = 0xA4,
1351 ARMV7_PERFCTR_PLE_RQST_PROG = 0xA5
1352};
1353
1354/*
1355 * Cortex-A8 HW events mapping
1356 *
1357 * The hardware events that we support. We do support cache operations but
1358 * we have harvard caches and no way to combine instruction and data
1359 * accesses/misses in hardware.
1360 */
1361static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
1362 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
1363 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
1364 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
1365 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
1366 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
1367 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1368 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
1369};
1370
1371static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
1372 [PERF_COUNT_HW_CACHE_OP_MAX]
1373 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1374 [C(L1D)] = {
1375 /*
1376 * The performance counters don't differentiate between read
1377 * and write accesses/misses so this isn't strictly correct,
1378 * but it's the best we can do. Writes and reads get
1379 * combined.
1380 */
1381 [C(OP_READ)] = {
1382 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
1383 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
1384 },
1385 [C(OP_WRITE)] = {
1386 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
1387 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
1388 },
1389 [C(OP_PREFETCH)] = {
1390 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1391 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1392 },
1393 },
1394 [C(L1I)] = {
1395 [C(OP_READ)] = {
1396 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST,
1397 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS,
1398 },
1399 [C(OP_WRITE)] = {
1400 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST,
1401 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS,
1402 },
1403 [C(OP_PREFETCH)] = {
1404 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1405 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1406 },
1407 },
1408 [C(LL)] = {
1409 [C(OP_READ)] = {
1410 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS,
1411 [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS,
1412 },
1413 [C(OP_WRITE)] = {
1414 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS,
1415 [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS,
1416 },
1417 [C(OP_PREFETCH)] = {
1418 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1419 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1420 },
1421 },
1422 [C(DTLB)] = {
1423 /*
1424 * Only ITLB misses and DTLB refills are supported.
1425 * If users want the DTLB refills misses a raw counter
1426 * must be used.
1427 */
1428 [C(OP_READ)] = {
1429 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1430 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
1431 },
1432 [C(OP_WRITE)] = {
1433 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1434 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
1435 },
1436 [C(OP_PREFETCH)] = {
1437 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1438 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1439 },
1440 },
1441 [C(ITLB)] = {
1442 [C(OP_READ)] = {
1443 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1444 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
1445 },
1446 [C(OP_WRITE)] = {
1447 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1448 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
1449 },
1450 [C(OP_PREFETCH)] = {
1451 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1452 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1453 },
1454 },
1455 [C(BPU)] = {
1456 [C(OP_READ)] = {
1457 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
1458 [C(RESULT_MISS)]
1459 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1460 },
1461 [C(OP_WRITE)] = {
1462 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
1463 [C(RESULT_MISS)]
1464 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1465 },
1466 [C(OP_PREFETCH)] = {
1467 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1468 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1469 },
1470 },
1471};
1472
1473/*
1474 * Cortex-A9 HW events mapping
1475 */
1476static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
1477 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
1478 [PERF_COUNT_HW_INSTRUCTIONS] =
1479 ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE,
1480 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_COHERENT_LINE_HIT,
1481 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_COHERENT_LINE_MISS,
1482 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
1483 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1484 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
1485};
1486
1487static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
1488 [PERF_COUNT_HW_CACHE_OP_MAX]
1489 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1490 [C(L1D)] = {
1491 /*
1492 * The performance counters don't differentiate between read
1493 * and write accesses/misses so this isn't strictly correct,
1494 * but it's the best we can do. Writes and reads get
1495 * combined.
1496 */
1497 [C(OP_READ)] = {
1498 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
1499 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
1500 },
1501 [C(OP_WRITE)] = {
1502 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
1503 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
1504 },
1505 [C(OP_PREFETCH)] = {
1506 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1507 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1508 },
1509 },
1510 [C(L1I)] = {
1511 [C(OP_READ)] = {
1512 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1513 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
1514 },
1515 [C(OP_WRITE)] = {
1516 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1517 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
1518 },
1519 [C(OP_PREFETCH)] = {
1520 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1521 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1522 },
1523 },
1524 [C(LL)] = {
1525 [C(OP_READ)] = {
1526 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1527 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1528 },
1529 [C(OP_WRITE)] = {
1530 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1531 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1532 },
1533 [C(OP_PREFETCH)] = {
1534 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1535 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1536 },
1537 },
1538 [C(DTLB)] = {
1539 /*
1540 * Only ITLB misses and DTLB refills are supported.
1541 * If users want the DTLB refills misses a raw counter
1542 * must be used.
1543 */
1544 [C(OP_READ)] = {
1545 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1546 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
1547 },
1548 [C(OP_WRITE)] = {
1549 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1550 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
1551 },
1552 [C(OP_PREFETCH)] = {
1553 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1554 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1555 },
1556 },
1557 [C(ITLB)] = {
1558 [C(OP_READ)] = {
1559 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1560 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
1561 },
1562 [C(OP_WRITE)] = {
1563 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1564 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
1565 },
1566 [C(OP_PREFETCH)] = {
1567 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1568 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1569 },
1570 },
1571 [C(BPU)] = {
1572 [C(OP_READ)] = {
1573 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
1574 [C(RESULT_MISS)]
1575 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1576 },
1577 [C(OP_WRITE)] = {
1578 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
1579 [C(RESULT_MISS)]
1580 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1581 },
1582 [C(OP_PREFETCH)] = {
1583 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1584 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1585 },
1586 },
1587};
1588
1589/*
1590 * Perf Events counters
1591 */
1592enum armv7_counters {
1593 ARMV7_CYCLE_COUNTER = 1, /* Cycle counter */
1594 ARMV7_COUNTER0 = 2, /* First event counter */
1595};
1596
1597/*
1598 * The cycle counter is ARMV7_CYCLE_COUNTER.
1599 * The first event counter is ARMV7_COUNTER0.
1600 * The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1).
1601 */
1602#define ARMV7_COUNTER_LAST (ARMV7_COUNTER0 + armpmu->num_events - 1)
1603
1604/*
1605 * ARMv7 low level PMNC access
1606 */
1607
1608/*
1609 * Per-CPU PMNC: config reg
1610 */
1611#define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
1612#define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
1613#define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
1614#define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
1615#define ARMV7_PMNC_X (1 << 4) /* Export to ETM */
1616#define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
1617#define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */
1618#define ARMV7_PMNC_N_MASK 0x1f
1619#define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
1620
1621/*
1622 * Available counters
1623 */
1624#define ARMV7_CNT0 0 /* First event counter */
1625#define ARMV7_CCNT 31 /* Cycle counter */
1626
1627/* Perf Event to low level counters mapping */
1628#define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0)
1629
1630/*
1631 * CNTENS: counters enable reg
1632 */
1633#define ARMV7_CNTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1634#define ARMV7_CNTENS_C (1 << ARMV7_CCNT)
1635
1636/*
1637 * CNTENC: counters disable reg
1638 */
1639#define ARMV7_CNTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1640#define ARMV7_CNTENC_C (1 << ARMV7_CCNT)
1641
1642/*
1643 * INTENS: counters overflow interrupt enable reg
1644 */
1645#define ARMV7_INTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1646#define ARMV7_INTENS_C (1 << ARMV7_CCNT)
1647
1648/*
1649 * INTENC: counters overflow interrupt disable reg
1650 */
1651#define ARMV7_INTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1652#define ARMV7_INTENC_C (1 << ARMV7_CCNT)
1653
1654/*
1655 * EVTSEL: Event selection reg
1656 */
d10fca9f 1657#define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */
796d1295
JP
1658
1659/*
1660 * SELECT: Counter selection reg
1661 */
1662#define ARMV7_SELECT_MASK 0x1f /* Mask for writable bits */
1663
1664/*
1665 * FLAG: counters overflow flag status reg
1666 */
1667#define ARMV7_FLAG_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1668#define ARMV7_FLAG_C (1 << ARMV7_CCNT)
1669#define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
1670#define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
1671
1672static inline unsigned long armv7_pmnc_read(void)
1673{
1674 u32 val;
1675 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
1676 return val;
1677}
1678
1679static inline void armv7_pmnc_write(unsigned long val)
1680{
1681 val &= ARMV7_PMNC_MASK;
1682 asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
1683}
1684
1685static inline int armv7_pmnc_has_overflowed(unsigned long pmnc)
1686{
1687 return pmnc & ARMV7_OVERFLOWED_MASK;
1688}
1689
1690static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc,
1691 enum armv7_counters counter)
1692{
1693 int ret;
1694
1695 if (counter == ARMV7_CYCLE_COUNTER)
1696 ret = pmnc & ARMV7_FLAG_C;
1697 else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST))
1698 ret = pmnc & ARMV7_FLAG_P(counter);
1699 else
1700 pr_err("CPU%u checking wrong counter %d overflow status\n",
1701 smp_processor_id(), counter);
1702
1703 return ret;
1704}
1705
1706static inline int armv7_pmnc_select_counter(unsigned int idx)
1707{
1708 u32 val;
1709
1710 if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) {
1711 pr_err("CPU%u selecting wrong PMNC counter"
1712 " %d\n", smp_processor_id(), idx);
1713 return -1;
1714 }
1715
1716 val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK;
1717 asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val));
1718
1719 return idx;
1720}
1721
1722static inline u32 armv7pmu_read_counter(int idx)
1723{
1724 unsigned long value = 0;
1725
1726 if (idx == ARMV7_CYCLE_COUNTER)
1727 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
1728 else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
1729 if (armv7_pmnc_select_counter(idx) == idx)
1730 asm volatile("mrc p15, 0, %0, c9, c13, 2"
1731 : "=r" (value));
1732 } else
1733 pr_err("CPU%u reading wrong counter %d\n",
1734 smp_processor_id(), idx);
1735
1736 return value;
1737}
1738
1739static inline void armv7pmu_write_counter(int idx, u32 value)
1740{
1741 if (idx == ARMV7_CYCLE_COUNTER)
1742 asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
1743 else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
1744 if (armv7_pmnc_select_counter(idx) == idx)
1745 asm volatile("mcr p15, 0, %0, c9, c13, 2"
1746 : : "r" (value));
1747 } else
1748 pr_err("CPU%u writing wrong counter %d\n",
1749 smp_processor_id(), idx);
1750}
1751
1752static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val)
1753{
1754 if (armv7_pmnc_select_counter(idx) == idx) {
1755 val &= ARMV7_EVTSEL_MASK;
1756 asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
1757 }
1758}
1759
1760static inline u32 armv7_pmnc_enable_counter(unsigned int idx)
1761{
1762 u32 val;
1763
1764 if ((idx != ARMV7_CYCLE_COUNTER) &&
1765 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
1766 pr_err("CPU%u enabling wrong PMNC counter"
1767 " %d\n", smp_processor_id(), idx);
1768 return -1;
1769 }
1770
1771 if (idx == ARMV7_CYCLE_COUNTER)
1772 val = ARMV7_CNTENS_C;
1773 else
1774 val = ARMV7_CNTENS_P(idx);
1775
1776 asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
1777
1778 return idx;
1779}
1780
1781static inline u32 armv7_pmnc_disable_counter(unsigned int idx)
1782{
1783 u32 val;
1784
1785
1786 if ((idx != ARMV7_CYCLE_COUNTER) &&
1787 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
1788 pr_err("CPU%u disabling wrong PMNC counter"
1789 " %d\n", smp_processor_id(), idx);
1790 return -1;
1791 }
1792
1793 if (idx == ARMV7_CYCLE_COUNTER)
1794 val = ARMV7_CNTENC_C;
1795 else
1796 val = ARMV7_CNTENC_P(idx);
1797
1798 asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
1799
1800 return idx;
1801}
1802
1803static inline u32 armv7_pmnc_enable_intens(unsigned int idx)
1804{
1805 u32 val;
1806
1807 if ((idx != ARMV7_CYCLE_COUNTER) &&
1808 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
1809 pr_err("CPU%u enabling wrong PMNC counter"
1810 " interrupt enable %d\n", smp_processor_id(), idx);
1811 return -1;
1812 }
1813
1814 if (idx == ARMV7_CYCLE_COUNTER)
1815 val = ARMV7_INTENS_C;
1816 else
1817 val = ARMV7_INTENS_P(idx);
1818
1819 asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val));
1820
1821 return idx;
1822}
1823
1824static inline u32 armv7_pmnc_disable_intens(unsigned int idx)
1825{
1826 u32 val;
1827
1828 if ((idx != ARMV7_CYCLE_COUNTER) &&
1829 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
1830 pr_err("CPU%u disabling wrong PMNC counter"
1831 " interrupt enable %d\n", smp_processor_id(), idx);
1832 return -1;
1833 }
1834
1835 if (idx == ARMV7_CYCLE_COUNTER)
1836 val = ARMV7_INTENC_C;
1837 else
1838 val = ARMV7_INTENC_P(idx);
1839
1840 asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val));
1841
1842 return idx;
1843}
1844
1845static inline u32 armv7_pmnc_getreset_flags(void)
1846{
1847 u32 val;
1848
1849 /* Read */
1850 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
1851
1852 /* Write to clear flags */
1853 val &= ARMV7_FLAG_MASK;
1854 asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
1855
1856 return val;
1857}
1858
1859#ifdef DEBUG
1860static void armv7_pmnc_dump_regs(void)
1861{
1862 u32 val;
1863 unsigned int cnt;
1864
1865 printk(KERN_INFO "PMNC registers dump:\n");
1866
1867 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
1868 printk(KERN_INFO "PMNC =0x%08x\n", val);
1869
1870 asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
1871 printk(KERN_INFO "CNTENS=0x%08x\n", val);
1872
1873 asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
1874 printk(KERN_INFO "INTENS=0x%08x\n", val);
1875
1876 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
1877 printk(KERN_INFO "FLAGS =0x%08x\n", val);
1878
1879 asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
1880 printk(KERN_INFO "SELECT=0x%08x\n", val);
1881
1882 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
1883 printk(KERN_INFO "CCNT =0x%08x\n", val);
1884
1885 for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) {
1886 armv7_pmnc_select_counter(cnt);
1887 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
1888 printk(KERN_INFO "CNT[%d] count =0x%08x\n",
1889 cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
1890 asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
1891 printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
1892 cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
1893 }
1894}
1895#endif
1896
1897void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
1898{
1899 unsigned long flags;
1900
1901 /*
1902 * Enable counter and interrupt, and set the counter to count
1903 * the event that we're interested in.
1904 */
1905 spin_lock_irqsave(&pmu_lock, flags);
1906
1907 /*
1908 * Disable counter
1909 */
1910 armv7_pmnc_disable_counter(idx);
1911
1912 /*
1913 * Set event (if destined for PMNx counters)
1914 * We don't need to set the event if it's a cycle count
1915 */
1916 if (idx != ARMV7_CYCLE_COUNTER)
1917 armv7_pmnc_write_evtsel(idx, hwc->config_base);
1918
1919 /*
1920 * Enable interrupt for this counter
1921 */
1922 armv7_pmnc_enable_intens(idx);
1923
1924 /*
1925 * Enable counter
1926 */
1927 armv7_pmnc_enable_counter(idx);
1928
1929 spin_unlock_irqrestore(&pmu_lock, flags);
1930}
1931
1932static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
1933{
1934 unsigned long flags;
1935
1936 /*
1937 * Disable counter and interrupt
1938 */
1939 spin_lock_irqsave(&pmu_lock, flags);
1940
1941 /*
1942 * Disable counter
1943 */
1944 armv7_pmnc_disable_counter(idx);
1945
1946 /*
1947 * Disable interrupt for this counter
1948 */
1949 armv7_pmnc_disable_intens(idx);
1950
1951 spin_unlock_irqrestore(&pmu_lock, flags);
1952}
1953
1954static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1955{
1956 unsigned long pmnc;
1957 struct perf_sample_data data;
1958 struct cpu_hw_events *cpuc;
1959 struct pt_regs *regs;
1960 int idx;
1961
1962 /*
1963 * Get and reset the IRQ flags
1964 */
1965 pmnc = armv7_pmnc_getreset_flags();
1966
1967 /*
1968 * Did an overflow occur?
1969 */
1970 if (!armv7_pmnc_has_overflowed(pmnc))
1971 return IRQ_NONE;
1972
1973 /*
1974 * Handle the counter(s) overflow(s)
1975 */
1976 regs = get_irq_regs();
1977
dc1d628a 1978 perf_sample_data_init(&data, 0);
796d1295
JP
1979
1980 cpuc = &__get_cpu_var(cpu_hw_events);
1981 for (idx = 0; idx <= armpmu->num_events; ++idx) {
1982 struct perf_event *event = cpuc->events[idx];
1983 struct hw_perf_event *hwc;
1984
1985 if (!test_bit(idx, cpuc->active_mask))
1986 continue;
1987
1988 /*
1989 * We have a single interrupt for all counters. Check that
1990 * each counter has overflowed before we process it.
1991 */
1992 if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
1993 continue;
1994
1995 hwc = &event->hw;
1996 armpmu_event_update(event, hwc, idx);
1997 data.period = event->hw.last_period;
1998 if (!armpmu_event_set_period(event, hwc, idx))
1999 continue;
2000
2001 if (perf_event_overflow(event, 0, &data, regs))
2002 armpmu->disable(hwc, idx);
2003 }
2004
2005 /*
2006 * Handle the pending perf events.
2007 *
2008 * Note: this call *must* be run with interrupts enabled. For
2009 * platforms that can have the PMU interrupts raised as a PMI, this
2010 * will not work.
2011 */
2012 perf_event_do_pending();
2013
2014 return IRQ_HANDLED;
2015}
2016
2017static void armv7pmu_start(void)
2018{
2019 unsigned long flags;
2020
2021 spin_lock_irqsave(&pmu_lock, flags);
2022 /* Enable all counters */
2023 armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
2024 spin_unlock_irqrestore(&pmu_lock, flags);
2025}
2026
2027static void armv7pmu_stop(void)
2028{
2029 unsigned long flags;
2030
2031 spin_lock_irqsave(&pmu_lock, flags);
2032 /* Disable all counters */
2033 armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
2034 spin_unlock_irqrestore(&pmu_lock, flags);
2035}
2036
2037static inline int armv7_a8_pmu_event_map(int config)
2038{
2039 int mapping = armv7_a8_perf_map[config];
2040 if (HW_OP_UNSUPPORTED == mapping)
2041 mapping = -EOPNOTSUPP;
2042 return mapping;
2043}
2044
2045static inline int armv7_a9_pmu_event_map(int config)
2046{
2047 int mapping = armv7_a9_perf_map[config];
2048 if (HW_OP_UNSUPPORTED == mapping)
2049 mapping = -EOPNOTSUPP;
2050 return mapping;
2051}
2052
2053static u64 armv7pmu_raw_event(u64 config)
2054{
2055 return config & 0xff;
2056}
2057
2058static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc,
2059 struct hw_perf_event *event)
2060{
2061 int idx;
2062
2063 /* Always place a cycle counter into the cycle counter. */
2064 if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) {
2065 if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask))
2066 return -EAGAIN;
2067
2068 return ARMV7_CYCLE_COUNTER;
2069 } else {
2070 /*
2071 * For anything other than a cycle counter, try and use
2072 * the events counters
2073 */
2074 for (idx = ARMV7_COUNTER0; idx <= armpmu->num_events; ++idx) {
2075 if (!test_and_set_bit(idx, cpuc->used_mask))
2076 return idx;
2077 }
2078
2079 /* The counters are all in use. */
2080 return -EAGAIN;
2081 }
2082}
2083
2084static struct arm_pmu armv7pmu = {
2085 .handle_irq = armv7pmu_handle_irq,
2086 .enable = armv7pmu_enable_event,
2087 .disable = armv7pmu_disable_event,
2088 .raw_event = armv7pmu_raw_event,
2089 .read_counter = armv7pmu_read_counter,
2090 .write_counter = armv7pmu_write_counter,
2091 .get_event_idx = armv7pmu_get_event_idx,
2092 .start = armv7pmu_start,
2093 .stop = armv7pmu_stop,
2094 .max_period = (1LLU << 32) - 1,
2095};
2096
2097static u32 __init armv7_reset_read_pmnc(void)
2098{
2099 u32 nb_cnt;
2100
2101 /* Initialize & Reset PMNC: C and P bits */
2102 armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
2103
2104 /* Read the nb of CNTx counters supported from PMNC */
2105 nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
2106
2107 /* Add the CPU cycles counter and return */
2108 return nb_cnt + 1;
2109}
2110
49e6a32f
WD
2111/*
2112 * ARMv5 [xscale] Performance counter handling code.
2113 *
2114 * Based on xscale OProfile code.
2115 *
2116 * There are two variants of the xscale PMU that we support:
2117 * - xscale1pmu: 2 event counters and a cycle counter
2118 * - xscale2pmu: 4 event counters and a cycle counter
2119 * The two variants share event definitions, but have different
2120 * PMU structures.
2121 */
2122
2123enum xscale_perf_types {
2124 XSCALE_PERFCTR_ICACHE_MISS = 0x00,
2125 XSCALE_PERFCTR_ICACHE_NO_DELIVER = 0x01,
2126 XSCALE_PERFCTR_DATA_STALL = 0x02,
2127 XSCALE_PERFCTR_ITLB_MISS = 0x03,
2128 XSCALE_PERFCTR_DTLB_MISS = 0x04,
2129 XSCALE_PERFCTR_BRANCH = 0x05,
2130 XSCALE_PERFCTR_BRANCH_MISS = 0x06,
2131 XSCALE_PERFCTR_INSTRUCTION = 0x07,
2132 XSCALE_PERFCTR_DCACHE_FULL_STALL = 0x08,
2133 XSCALE_PERFCTR_DCACHE_FULL_STALL_CONTIG = 0x09,
2134 XSCALE_PERFCTR_DCACHE_ACCESS = 0x0A,
2135 XSCALE_PERFCTR_DCACHE_MISS = 0x0B,
2136 XSCALE_PERFCTR_DCACHE_WRITE_BACK = 0x0C,
2137 XSCALE_PERFCTR_PC_CHANGED = 0x0D,
2138 XSCALE_PERFCTR_BCU_REQUEST = 0x10,
2139 XSCALE_PERFCTR_BCU_FULL = 0x11,
2140 XSCALE_PERFCTR_BCU_DRAIN = 0x12,
2141 XSCALE_PERFCTR_BCU_ECC_NO_ELOG = 0x14,
2142 XSCALE_PERFCTR_BCU_1_BIT_ERR = 0x15,
2143 XSCALE_PERFCTR_RMW = 0x16,
2144 /* XSCALE_PERFCTR_CCNT is not hardware defined */
2145 XSCALE_PERFCTR_CCNT = 0xFE,
2146 XSCALE_PERFCTR_UNUSED = 0xFF,
2147};
2148
2149enum xscale_counters {
2150 XSCALE_CYCLE_COUNTER = 1,
2151 XSCALE_COUNTER0,
2152 XSCALE_COUNTER1,
2153 XSCALE_COUNTER2,
2154 XSCALE_COUNTER3,
2155};
2156
2157static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = {
2158 [PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT,
2159 [PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION,
2160 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
2161 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
2162 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH,
2163 [PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS,
2164 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
2165};
2166
2167static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
2168 [PERF_COUNT_HW_CACHE_OP_MAX]
2169 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2170 [C(L1D)] = {
2171 [C(OP_READ)] = {
2172 [C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
2173 [C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
2174 },
2175 [C(OP_WRITE)] = {
2176 [C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
2177 [C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
2178 },
2179 [C(OP_PREFETCH)] = {
2180 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2181 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
2182 },
2183 },
2184 [C(L1I)] = {
2185 [C(OP_READ)] = {
2186 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2187 [C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS,
2188 },
2189 [C(OP_WRITE)] = {
2190 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2191 [C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS,
2192 },
2193 [C(OP_PREFETCH)] = {
2194 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2195 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
2196 },
2197 },
2198 [C(LL)] = {
2199 [C(OP_READ)] = {
2200 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2201 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
2202 },
2203 [C(OP_WRITE)] = {
2204 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2205 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
2206 },
2207 [C(OP_PREFETCH)] = {
2208 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2209 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
2210 },
2211 },
2212 [C(DTLB)] = {
2213 [C(OP_READ)] = {
2214 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2215 [C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
2216 },
2217 [C(OP_WRITE)] = {
2218 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2219 [C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
2220 },
2221 [C(OP_PREFETCH)] = {
2222 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2223 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
2224 },
2225 },
2226 [C(ITLB)] = {
2227 [C(OP_READ)] = {
2228 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2229 [C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
2230 },
2231 [C(OP_WRITE)] = {
2232 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2233 [C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
2234 },
2235 [C(OP_PREFETCH)] = {
2236 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2237 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
2238 },
2239 },
2240 [C(BPU)] = {
2241 [C(OP_READ)] = {
2242 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2243 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
2244 },
2245 [C(OP_WRITE)] = {
2246 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2247 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
2248 },
2249 [C(OP_PREFETCH)] = {
2250 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2251 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
2252 },
2253 },
2254};
2255
2256#define XSCALE_PMU_ENABLE 0x001
2257#define XSCALE_PMN_RESET 0x002
2258#define XSCALE_CCNT_RESET 0x004
2259#define XSCALE_PMU_RESET (CCNT_RESET | PMN_RESET)
2260#define XSCALE_PMU_CNT64 0x008
2261
2262static inline int
2263xscalepmu_event_map(int config)
2264{
2265 int mapping = xscale_perf_map[config];
2266 if (HW_OP_UNSUPPORTED == mapping)
2267 mapping = -EOPNOTSUPP;
2268 return mapping;
2269}
2270
2271static u64
2272xscalepmu_raw_event(u64 config)
2273{
2274 return config & 0xff;
2275}
2276
2277#define XSCALE1_OVERFLOWED_MASK 0x700
2278#define XSCALE1_CCOUNT_OVERFLOW 0x400
2279#define XSCALE1_COUNT0_OVERFLOW 0x100
2280#define XSCALE1_COUNT1_OVERFLOW 0x200
2281#define XSCALE1_CCOUNT_INT_EN 0x040
2282#define XSCALE1_COUNT0_INT_EN 0x010
2283#define XSCALE1_COUNT1_INT_EN 0x020
2284#define XSCALE1_COUNT0_EVT_SHFT 12
2285#define XSCALE1_COUNT0_EVT_MASK (0xff << XSCALE1_COUNT0_EVT_SHFT)
2286#define XSCALE1_COUNT1_EVT_SHFT 20
2287#define XSCALE1_COUNT1_EVT_MASK (0xff << XSCALE1_COUNT1_EVT_SHFT)
2288
2289static inline u32
2290xscale1pmu_read_pmnc(void)
2291{
2292 u32 val;
2293 asm volatile("mrc p14, 0, %0, c0, c0, 0" : "=r" (val));
2294 return val;
2295}
2296
2297static inline void
2298xscale1pmu_write_pmnc(u32 val)
2299{
2300 /* upper 4bits and 7, 11 are write-as-0 */
2301 val &= 0xffff77f;
2302 asm volatile("mcr p14, 0, %0, c0, c0, 0" : : "r" (val));
2303}
2304
2305static inline int
2306xscale1_pmnc_counter_has_overflowed(unsigned long pmnc,
2307 enum xscale_counters counter)
2308{
2309 int ret = 0;
2310
2311 switch (counter) {
2312 case XSCALE_CYCLE_COUNTER:
2313 ret = pmnc & XSCALE1_CCOUNT_OVERFLOW;
2314 break;
2315 case XSCALE_COUNTER0:
2316 ret = pmnc & XSCALE1_COUNT0_OVERFLOW;
2317 break;
2318 case XSCALE_COUNTER1:
2319 ret = pmnc & XSCALE1_COUNT1_OVERFLOW;
2320 break;
2321 default:
2322 WARN_ONCE(1, "invalid counter number (%d)\n", counter);
2323 }
2324
2325 return ret;
2326}
2327
2328static irqreturn_t
2329xscale1pmu_handle_irq(int irq_num, void *dev)
2330{
2331 unsigned long pmnc;
2332 struct perf_sample_data data;
2333 struct cpu_hw_events *cpuc;
2334 struct pt_regs *regs;
2335 int idx;
2336
2337 /*
2338 * NOTE: there's an A stepping erratum that states if an overflow
2339 * bit already exists and another occurs, the previous
2340 * Overflow bit gets cleared. There's no workaround.
2341 * Fixed in B stepping or later.
2342 */
2343 pmnc = xscale1pmu_read_pmnc();
2344
2345 /*
2346 * Write the value back to clear the overflow flags. Overflow
2347 * flags remain in pmnc for use below. We also disable the PMU
2348 * while we process the interrupt.
2349 */
2350 xscale1pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
2351
2352 if (!(pmnc & XSCALE1_OVERFLOWED_MASK))
2353 return IRQ_NONE;
2354
2355 regs = get_irq_regs();
2356
2357 perf_sample_data_init(&data, 0);
2358
2359 cpuc = &__get_cpu_var(cpu_hw_events);
2360 for (idx = 0; idx <= armpmu->num_events; ++idx) {
2361 struct perf_event *event = cpuc->events[idx];
2362 struct hw_perf_event *hwc;
2363
2364 if (!test_bit(idx, cpuc->active_mask))
2365 continue;
2366
2367 if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
2368 continue;
2369
2370 hwc = &event->hw;
2371 armpmu_event_update(event, hwc, idx);
2372 data.period = event->hw.last_period;
2373 if (!armpmu_event_set_period(event, hwc, idx))
2374 continue;
2375
2376 if (perf_event_overflow(event, 0, &data, regs))
2377 armpmu->disable(hwc, idx);
2378 }
2379
2380 perf_event_do_pending();
2381
2382 /*
2383 * Re-enable the PMU.
2384 */
2385 pmnc = xscale1pmu_read_pmnc() | XSCALE_PMU_ENABLE;
2386 xscale1pmu_write_pmnc(pmnc);
2387
2388 return IRQ_HANDLED;
2389}
2390
2391static void
2392xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
2393{
2394 unsigned long val, mask, evt, flags;
2395
2396 switch (idx) {
2397 case XSCALE_CYCLE_COUNTER:
2398 mask = 0;
2399 evt = XSCALE1_CCOUNT_INT_EN;
2400 break;
2401 case XSCALE_COUNTER0:
2402 mask = XSCALE1_COUNT0_EVT_MASK;
2403 evt = (hwc->config_base << XSCALE1_COUNT0_EVT_SHFT) |
2404 XSCALE1_COUNT0_INT_EN;
2405 break;
2406 case XSCALE_COUNTER1:
2407 mask = XSCALE1_COUNT1_EVT_MASK;
2408 evt = (hwc->config_base << XSCALE1_COUNT1_EVT_SHFT) |
2409 XSCALE1_COUNT1_INT_EN;
2410 break;
2411 default:
2412 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
2413 return;
2414 }
2415
2416 spin_lock_irqsave(&pmu_lock, flags);
2417 val = xscale1pmu_read_pmnc();
2418 val &= ~mask;
2419 val |= evt;
2420 xscale1pmu_write_pmnc(val);
2421 spin_unlock_irqrestore(&pmu_lock, flags);
2422}
2423
2424static void
2425xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
2426{
2427 unsigned long val, mask, evt, flags;
2428
2429 switch (idx) {
2430 case XSCALE_CYCLE_COUNTER:
2431 mask = XSCALE1_CCOUNT_INT_EN;
2432 evt = 0;
2433 break;
2434 case XSCALE_COUNTER0:
2435 mask = XSCALE1_COUNT0_INT_EN | XSCALE1_COUNT0_EVT_MASK;
2436 evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT0_EVT_SHFT;
2437 break;
2438 case XSCALE_COUNTER1:
2439 mask = XSCALE1_COUNT1_INT_EN | XSCALE1_COUNT1_EVT_MASK;
2440 evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT1_EVT_SHFT;
2441 break;
2442 default:
2443 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
2444 return;
2445 }
2446
2447 spin_lock_irqsave(&pmu_lock, flags);
2448 val = xscale1pmu_read_pmnc();
2449 val &= ~mask;
2450 val |= evt;
2451 xscale1pmu_write_pmnc(val);
2452 spin_unlock_irqrestore(&pmu_lock, flags);
2453}
2454
2455static int
2456xscale1pmu_get_event_idx(struct cpu_hw_events *cpuc,
2457 struct hw_perf_event *event)
2458{
2459 if (XSCALE_PERFCTR_CCNT == event->config_base) {
2460 if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask))
2461 return -EAGAIN;
2462
2463 return XSCALE_CYCLE_COUNTER;
2464 } else {
2465 if (!test_and_set_bit(XSCALE_COUNTER1, cpuc->used_mask)) {
2466 return XSCALE_COUNTER1;
2467 }
2468
2469 if (!test_and_set_bit(XSCALE_COUNTER0, cpuc->used_mask)) {
2470 return XSCALE_COUNTER0;
2471 }
2472
2473 return -EAGAIN;
2474 }
2475}
2476
2477static void
2478xscale1pmu_start(void)
2479{
2480 unsigned long flags, val;
2481
2482 spin_lock_irqsave(&pmu_lock, flags);
2483 val = xscale1pmu_read_pmnc();
2484 val |= XSCALE_PMU_ENABLE;
2485 xscale1pmu_write_pmnc(val);
2486 spin_unlock_irqrestore(&pmu_lock, flags);
2487}
2488
2489static void
2490xscale1pmu_stop(void)
2491{
2492 unsigned long flags, val;
2493
2494 spin_lock_irqsave(&pmu_lock, flags);
2495 val = xscale1pmu_read_pmnc();
2496 val &= ~XSCALE_PMU_ENABLE;
2497 xscale1pmu_write_pmnc(val);
2498 spin_unlock_irqrestore(&pmu_lock, flags);
2499}
2500
2501static inline u32
2502xscale1pmu_read_counter(int counter)
2503{
2504 u32 val = 0;
2505
2506 switch (counter) {
2507 case XSCALE_CYCLE_COUNTER:
2508 asm volatile("mrc p14, 0, %0, c1, c0, 0" : "=r" (val));
2509 break;
2510 case XSCALE_COUNTER0:
2511 asm volatile("mrc p14, 0, %0, c2, c0, 0" : "=r" (val));
2512 break;
2513 case XSCALE_COUNTER1:
2514 asm volatile("mrc p14, 0, %0, c3, c0, 0" : "=r" (val));
2515 break;
2516 }
2517
2518 return val;
2519}
2520
2521static inline void
2522xscale1pmu_write_counter(int counter, u32 val)
2523{
2524 switch (counter) {
2525 case XSCALE_CYCLE_COUNTER:
2526 asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val));
2527 break;
2528 case XSCALE_COUNTER0:
2529 asm volatile("mcr p14, 0, %0, c2, c0, 0" : : "r" (val));
2530 break;
2531 case XSCALE_COUNTER1:
2532 asm volatile("mcr p14, 0, %0, c3, c0, 0" : : "r" (val));
2533 break;
2534 }
2535}
2536
2537static const struct arm_pmu xscale1pmu = {
2538 .id = ARM_PERF_PMU_ID_XSCALE1,
2539 .handle_irq = xscale1pmu_handle_irq,
2540 .enable = xscale1pmu_enable_event,
2541 .disable = xscale1pmu_disable_event,
2542 .event_map = xscalepmu_event_map,
2543 .raw_event = xscalepmu_raw_event,
2544 .read_counter = xscale1pmu_read_counter,
2545 .write_counter = xscale1pmu_write_counter,
2546 .get_event_idx = xscale1pmu_get_event_idx,
2547 .start = xscale1pmu_start,
2548 .stop = xscale1pmu_stop,
2549 .num_events = 3,
2550 .max_period = (1LLU << 32) - 1,
2551};
2552
2553#define XSCALE2_OVERFLOWED_MASK 0x01f
2554#define XSCALE2_CCOUNT_OVERFLOW 0x001
2555#define XSCALE2_COUNT0_OVERFLOW 0x002
2556#define XSCALE2_COUNT1_OVERFLOW 0x004
2557#define XSCALE2_COUNT2_OVERFLOW 0x008
2558#define XSCALE2_COUNT3_OVERFLOW 0x010
2559#define XSCALE2_CCOUNT_INT_EN 0x001
2560#define XSCALE2_COUNT0_INT_EN 0x002
2561#define XSCALE2_COUNT1_INT_EN 0x004
2562#define XSCALE2_COUNT2_INT_EN 0x008
2563#define XSCALE2_COUNT3_INT_EN 0x010
2564#define XSCALE2_COUNT0_EVT_SHFT 0
2565#define XSCALE2_COUNT0_EVT_MASK (0xff << XSCALE2_COUNT0_EVT_SHFT)
2566#define XSCALE2_COUNT1_EVT_SHFT 8
2567#define XSCALE2_COUNT1_EVT_MASK (0xff << XSCALE2_COUNT1_EVT_SHFT)
2568#define XSCALE2_COUNT2_EVT_SHFT 16
2569#define XSCALE2_COUNT2_EVT_MASK (0xff << XSCALE2_COUNT2_EVT_SHFT)
2570#define XSCALE2_COUNT3_EVT_SHFT 24
2571#define XSCALE2_COUNT3_EVT_MASK (0xff << XSCALE2_COUNT3_EVT_SHFT)
2572
2573static inline u32
2574xscale2pmu_read_pmnc(void)
2575{
2576 u32 val;
2577 asm volatile("mrc p14, 0, %0, c0, c1, 0" : "=r" (val));
2578 /* bits 1-2 and 4-23 are read-unpredictable */
2579 return val & 0xff000009;
2580}
2581
2582static inline void
2583xscale2pmu_write_pmnc(u32 val)
2584{
2585 /* bits 4-23 are write-as-0, 24-31 are write ignored */
2586 val &= 0xf;
2587 asm volatile("mcr p14, 0, %0, c0, c1, 0" : : "r" (val));
2588}
2589
2590static inline u32
2591xscale2pmu_read_overflow_flags(void)
2592{
2593 u32 val;
2594 asm volatile("mrc p14, 0, %0, c5, c1, 0" : "=r" (val));
2595 return val;
2596}
2597
2598static inline void
2599xscale2pmu_write_overflow_flags(u32 val)
2600{
2601 asm volatile("mcr p14, 0, %0, c5, c1, 0" : : "r" (val));
2602}
2603
2604static inline u32
2605xscale2pmu_read_event_select(void)
2606{
2607 u32 val;
2608 asm volatile("mrc p14, 0, %0, c8, c1, 0" : "=r" (val));
2609 return val;
2610}
2611
2612static inline void
2613xscale2pmu_write_event_select(u32 val)
2614{
2615 asm volatile("mcr p14, 0, %0, c8, c1, 0" : : "r"(val));
2616}
2617
2618static inline u32
2619xscale2pmu_read_int_enable(void)
2620{
2621 u32 val;
2622 asm volatile("mrc p14, 0, %0, c4, c1, 0" : "=r" (val));
2623 return val;
2624}
2625
2626static void
2627xscale2pmu_write_int_enable(u32 val)
2628{
2629 asm volatile("mcr p14, 0, %0, c4, c1, 0" : : "r" (val));
2630}
2631
2632static inline int
2633xscale2_pmnc_counter_has_overflowed(unsigned long of_flags,
2634 enum xscale_counters counter)
2635{
2636 int ret = 0;
2637
2638 switch (counter) {
2639 case XSCALE_CYCLE_COUNTER:
2640 ret = of_flags & XSCALE2_CCOUNT_OVERFLOW;
2641 break;
2642 case XSCALE_COUNTER0:
2643 ret = of_flags & XSCALE2_COUNT0_OVERFLOW;
2644 break;
2645 case XSCALE_COUNTER1:
2646 ret = of_flags & XSCALE2_COUNT1_OVERFLOW;
2647 break;
2648 case XSCALE_COUNTER2:
2649 ret = of_flags & XSCALE2_COUNT2_OVERFLOW;
2650 break;
2651 case XSCALE_COUNTER3:
2652 ret = of_flags & XSCALE2_COUNT3_OVERFLOW;
2653 break;
2654 default:
2655 WARN_ONCE(1, "invalid counter number (%d)\n", counter);
2656 }
2657
2658 return ret;
2659}
2660
2661static irqreturn_t
2662xscale2pmu_handle_irq(int irq_num, void *dev)
2663{
2664 unsigned long pmnc, of_flags;
2665 struct perf_sample_data data;
2666 struct cpu_hw_events *cpuc;
2667 struct pt_regs *regs;
2668 int idx;
2669
2670 /* Disable the PMU. */
2671 pmnc = xscale2pmu_read_pmnc();
2672 xscale2pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
2673
2674 /* Check the overflow flag register. */
2675 of_flags = xscale2pmu_read_overflow_flags();
2676 if (!(of_flags & XSCALE2_OVERFLOWED_MASK))
2677 return IRQ_NONE;
2678
2679 /* Clear the overflow bits. */
2680 xscale2pmu_write_overflow_flags(of_flags);
2681
2682 regs = get_irq_regs();
2683
2684 perf_sample_data_init(&data, 0);
2685
2686 cpuc = &__get_cpu_var(cpu_hw_events);
2687 for (idx = 0; idx <= armpmu->num_events; ++idx) {
2688 struct perf_event *event = cpuc->events[idx];
2689 struct hw_perf_event *hwc;
2690
2691 if (!test_bit(idx, cpuc->active_mask))
2692 continue;
2693
2694 if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
2695 continue;
2696
2697 hwc = &event->hw;
2698 armpmu_event_update(event, hwc, idx);
2699 data.period = event->hw.last_period;
2700 if (!armpmu_event_set_period(event, hwc, idx))
2701 continue;
2702
2703 if (perf_event_overflow(event, 0, &data, regs))
2704 armpmu->disable(hwc, idx);
2705 }
2706
2707 perf_event_do_pending();
2708
2709 /*
2710 * Re-enable the PMU.
2711 */
2712 pmnc = xscale2pmu_read_pmnc() | XSCALE_PMU_ENABLE;
2713 xscale2pmu_write_pmnc(pmnc);
2714
2715 return IRQ_HANDLED;
2716}
2717
2718static void
2719xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
2720{
2721 unsigned long flags, ien, evtsel;
2722
2723 ien = xscale2pmu_read_int_enable();
2724 evtsel = xscale2pmu_read_event_select();
2725
2726 switch (idx) {
2727 case XSCALE_CYCLE_COUNTER:
2728 ien |= XSCALE2_CCOUNT_INT_EN;
2729 break;
2730 case XSCALE_COUNTER0:
2731 ien |= XSCALE2_COUNT0_INT_EN;
2732 evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
2733 evtsel |= hwc->config_base << XSCALE2_COUNT0_EVT_SHFT;
2734 break;
2735 case XSCALE_COUNTER1:
2736 ien |= XSCALE2_COUNT1_INT_EN;
2737 evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
2738 evtsel |= hwc->config_base << XSCALE2_COUNT1_EVT_SHFT;
2739 break;
2740 case XSCALE_COUNTER2:
2741 ien |= XSCALE2_COUNT2_INT_EN;
2742 evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
2743 evtsel |= hwc->config_base << XSCALE2_COUNT2_EVT_SHFT;
2744 break;
2745 case XSCALE_COUNTER3:
2746 ien |= XSCALE2_COUNT3_INT_EN;
2747 evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
2748 evtsel |= hwc->config_base << XSCALE2_COUNT3_EVT_SHFT;
2749 break;
2750 default:
2751 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
2752 return;
2753 }
2754
2755 spin_lock_irqsave(&pmu_lock, flags);
2756 xscale2pmu_write_event_select(evtsel);
2757 xscale2pmu_write_int_enable(ien);
2758 spin_unlock_irqrestore(&pmu_lock, flags);
2759}
2760
2761static void
2762xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
2763{
2764 unsigned long flags, ien, evtsel;
2765
2766 ien = xscale2pmu_read_int_enable();
2767 evtsel = xscale2pmu_read_event_select();
2768
2769 switch (idx) {
2770 case XSCALE_CYCLE_COUNTER:
2771 ien &= ~XSCALE2_CCOUNT_INT_EN;
2772 break;
2773 case XSCALE_COUNTER0:
2774 ien &= ~XSCALE2_COUNT0_INT_EN;
2775 evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
2776 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
2777 break;
2778 case XSCALE_COUNTER1:
2779 ien &= ~XSCALE2_COUNT1_INT_EN;
2780 evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
2781 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
2782 break;
2783 case XSCALE_COUNTER2:
2784 ien &= ~XSCALE2_COUNT2_INT_EN;
2785 evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
2786 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
2787 break;
2788 case XSCALE_COUNTER3:
2789 ien &= ~XSCALE2_COUNT3_INT_EN;
2790 evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
2791 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
2792 break;
2793 default:
2794 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
2795 return;
2796 }
2797
2798 spin_lock_irqsave(&pmu_lock, flags);
2799 xscale2pmu_write_event_select(evtsel);
2800 xscale2pmu_write_int_enable(ien);
2801 spin_unlock_irqrestore(&pmu_lock, flags);
2802}
2803
2804static int
2805xscale2pmu_get_event_idx(struct cpu_hw_events *cpuc,
2806 struct hw_perf_event *event)
2807{
2808 int idx = xscale1pmu_get_event_idx(cpuc, event);
2809 if (idx >= 0)
2810 goto out;
2811
2812 if (!test_and_set_bit(XSCALE_COUNTER3, cpuc->used_mask))
2813 idx = XSCALE_COUNTER3;
2814 else if (!test_and_set_bit(XSCALE_COUNTER2, cpuc->used_mask))
2815 idx = XSCALE_COUNTER2;
2816out:
2817 return idx;
2818}
2819
2820static void
2821xscale2pmu_start(void)
2822{
2823 unsigned long flags, val;
2824
2825 spin_lock_irqsave(&pmu_lock, flags);
2826 val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
2827 val |= XSCALE_PMU_ENABLE;
2828 xscale2pmu_write_pmnc(val);
2829 spin_unlock_irqrestore(&pmu_lock, flags);
2830}
2831
2832static void
2833xscale2pmu_stop(void)
2834{
2835 unsigned long flags, val;
2836
2837 spin_lock_irqsave(&pmu_lock, flags);
2838 val = xscale2pmu_read_pmnc();
2839 val &= ~XSCALE_PMU_ENABLE;
2840 xscale2pmu_write_pmnc(val);
2841 spin_unlock_irqrestore(&pmu_lock, flags);
2842}
2843
2844static inline u32
2845xscale2pmu_read_counter(int counter)
2846{
2847 u32 val = 0;
2848
2849 switch (counter) {
2850 case XSCALE_CYCLE_COUNTER:
2851 asm volatile("mrc p14, 0, %0, c1, c1, 0" : "=r" (val));
2852 break;
2853 case XSCALE_COUNTER0:
2854 asm volatile("mrc p14, 0, %0, c0, c2, 0" : "=r" (val));
2855 break;
2856 case XSCALE_COUNTER1:
2857 asm volatile("mrc p14, 0, %0, c1, c2, 0" : "=r" (val));
2858 break;
2859 case XSCALE_COUNTER2:
2860 asm volatile("mrc p14, 0, %0, c2, c2, 0" : "=r" (val));
2861 break;
2862 case XSCALE_COUNTER3:
2863 asm volatile("mrc p14, 0, %0, c3, c2, 0" : "=r" (val));
2864 break;
2865 }
2866
2867 return val;
2868}
2869
2870static inline void
2871xscale2pmu_write_counter(int counter, u32 val)
2872{
2873 switch (counter) {
2874 case XSCALE_CYCLE_COUNTER:
2875 asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val));
2876 break;
2877 case XSCALE_COUNTER0:
2878 asm volatile("mcr p14, 0, %0, c0, c2, 0" : : "r" (val));
2879 break;
2880 case XSCALE_COUNTER1:
2881 asm volatile("mcr p14, 0, %0, c1, c2, 0" : : "r" (val));
2882 break;
2883 case XSCALE_COUNTER2:
2884 asm volatile("mcr p14, 0, %0, c2, c2, 0" : : "r" (val));
2885 break;
2886 case XSCALE_COUNTER3:
2887 asm volatile("mcr p14, 0, %0, c3, c2, 0" : : "r" (val));
2888 break;
2889 }
2890}
2891
2892static const struct arm_pmu xscale2pmu = {
2893 .id = ARM_PERF_PMU_ID_XSCALE2,
2894 .handle_irq = xscale2pmu_handle_irq,
2895 .enable = xscale2pmu_enable_event,
2896 .disable = xscale2pmu_disable_event,
2897 .event_map = xscalepmu_event_map,
2898 .raw_event = xscalepmu_raw_event,
2899 .read_counter = xscale2pmu_read_counter,
2900 .write_counter = xscale2pmu_write_counter,
2901 .get_event_idx = xscale2pmu_get_event_idx,
2902 .start = xscale2pmu_start,
2903 .stop = xscale2pmu_stop,
2904 .num_events = 5,
2905 .max_period = (1LLU << 32) - 1,
2906};
2907
1b8873a0
JI
2908static int __init
2909init_hw_perf_events(void)
2910{
2911 unsigned long cpuid = read_cpuid_id();
2912 unsigned long implementor = (cpuid & 0xFF000000) >> 24;
2913 unsigned long part_number = (cpuid & 0xFFF0);
2914
49e6a32f 2915 /* ARM Ltd CPUs. */
1b8873a0
JI
2916 if (0x41 == implementor) {
2917 switch (part_number) {
2918 case 0xB360: /* ARM1136 */
2919 case 0xB560: /* ARM1156 */
2920 case 0xB760: /* ARM1176 */
2921 armpmu = &armv6pmu;
2922 memcpy(armpmu_perf_cache_map, armv6_perf_cache_map,
2923 sizeof(armv6_perf_cache_map));
2924 perf_max_events = armv6pmu.num_events;
2925 break;
2926 case 0xB020: /* ARM11mpcore */
2927 armpmu = &armv6mpcore_pmu;
2928 memcpy(armpmu_perf_cache_map,
2929 armv6mpcore_perf_cache_map,
2930 sizeof(armv6mpcore_perf_cache_map));
2931 perf_max_events = armv6mpcore_pmu.num_events;
2932 break;
796d1295 2933 case 0xC080: /* Cortex-A8 */
181193f3 2934 armv7pmu.id = ARM_PERF_PMU_ID_CA8;
796d1295
JP
2935 memcpy(armpmu_perf_cache_map, armv7_a8_perf_cache_map,
2936 sizeof(armv7_a8_perf_cache_map));
2937 armv7pmu.event_map = armv7_a8_pmu_event_map;
2938 armpmu = &armv7pmu;
2939
2940 /* Reset PMNC and read the nb of CNTx counters
2941 supported */
2942 armv7pmu.num_events = armv7_reset_read_pmnc();
2943 perf_max_events = armv7pmu.num_events;
2944 break;
2945 case 0xC090: /* Cortex-A9 */
181193f3 2946 armv7pmu.id = ARM_PERF_PMU_ID_CA9;
796d1295
JP
2947 memcpy(armpmu_perf_cache_map, armv7_a9_perf_cache_map,
2948 sizeof(armv7_a9_perf_cache_map));
2949 armv7pmu.event_map = armv7_a9_pmu_event_map;
2950 armpmu = &armv7pmu;
2951
2952 /* Reset PMNC and read the nb of CNTx counters
2953 supported */
2954 armv7pmu.num_events = armv7_reset_read_pmnc();
2955 perf_max_events = armv7pmu.num_events;
2956 break;
49e6a32f
WD
2957 }
2958 /* Intel CPUs [xscale]. */
2959 } else if (0x69 == implementor) {
2960 part_number = (cpuid >> 13) & 0x7;
2961 switch (part_number) {
2962 case 1:
2963 armpmu = &xscale1pmu;
2964 memcpy(armpmu_perf_cache_map, xscale_perf_cache_map,
2965 sizeof(xscale_perf_cache_map));
2966 perf_max_events = xscale1pmu.num_events;
2967 break;
2968 case 2:
2969 armpmu = &xscale2pmu;
2970 memcpy(armpmu_perf_cache_map, xscale_perf_cache_map,
2971 sizeof(xscale_perf_cache_map));
2972 perf_max_events = xscale2pmu.num_events;
2973 break;
1b8873a0
JI
2974 }
2975 }
2976
49e6a32f 2977 if (armpmu) {
796d1295 2978 pr_info("enabled with %s PMU driver, %d counters available\n",
49e6a32f
WD
2979 arm_pmu_names[armpmu->id], armpmu->num_events);
2980 } else {
2981 pr_info("no hardware support available\n");
2982 perf_max_events = -1;
2983 }
1b8873a0
JI
2984
2985 return 0;
2986}
2987arch_initcall(init_hw_perf_events);
2988
2989/*
2990 * Callchain handling code.
2991 */
2992static inline void
2993callchain_store(struct perf_callchain_entry *entry,
2994 u64 ip)
2995{
2996 if (entry->nr < PERF_MAX_STACK_DEPTH)
2997 entry->ip[entry->nr++] = ip;
2998}
2999
3000/*
3001 * The registers we're interested in are at the end of the variable
3002 * length saved register structure. The fp points at the end of this
3003 * structure so the address of this struct is:
3004 * (struct frame_tail *)(xxx->fp)-1
3005 *
3006 * This code has been adapted from the ARM OProfile support.
3007 */
3008struct frame_tail {
3009 struct frame_tail *fp;
3010 unsigned long sp;
3011 unsigned long lr;
3012} __attribute__((packed));
3013
3014/*
3015 * Get the return address for a single stackframe and return a pointer to the
3016 * next frame tail.
3017 */
3018static struct frame_tail *
3019user_backtrace(struct frame_tail *tail,
3020 struct perf_callchain_entry *entry)
3021{
3022 struct frame_tail buftail;
3023
3024 /* Also check accessibility of one struct frame_tail beyond */
3025 if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
3026 return NULL;
3027 if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
3028 return NULL;
3029
3030 callchain_store(entry, buftail.lr);
3031
3032 /*
3033 * Frame pointers should strictly progress back up the stack
3034 * (towards higher addresses).
3035 */
3036 if (tail >= buftail.fp)
3037 return NULL;
3038
3039 return buftail.fp - 1;
3040}
3041
3042static void
3043perf_callchain_user(struct pt_regs *regs,
3044 struct perf_callchain_entry *entry)
3045{
3046 struct frame_tail *tail;
3047
3048 callchain_store(entry, PERF_CONTEXT_USER);
3049
3050 if (!user_mode(regs))
3051 regs = task_pt_regs(current);
3052
3053 tail = (struct frame_tail *)regs->ARM_fp - 1;
3054
3055 while (tail && !((unsigned long)tail & 0x3))
3056 tail = user_backtrace(tail, entry);
3057}
3058
3059/*
3060 * Gets called by walk_stackframe() for every stackframe. This will be called
3061 * whist unwinding the stackframe and is like a subroutine return so we use
3062 * the PC.
3063 */
3064static int
3065callchain_trace(struct stackframe *fr,
3066 void *data)
3067{
3068 struct perf_callchain_entry *entry = data;
3069 callchain_store(entry, fr->pc);
3070 return 0;
3071}
3072
3073static void
3074perf_callchain_kernel(struct pt_regs *regs,
3075 struct perf_callchain_entry *entry)
3076{
3077 struct stackframe fr;
3078
3079 callchain_store(entry, PERF_CONTEXT_KERNEL);
3080 fr.fp = regs->ARM_fp;
3081 fr.sp = regs->ARM_sp;
3082 fr.lr = regs->ARM_lr;
3083 fr.pc = regs->ARM_pc;
3084 walk_stackframe(&fr, callchain_trace, entry);
3085}
3086
3087static void
3088perf_do_callchain(struct pt_regs *regs,
3089 struct perf_callchain_entry *entry)
3090{
3091 int is_user;
3092
3093 if (!regs)
3094 return;
3095
3096 is_user = user_mode(regs);
3097
3098 if (!current || !current->pid)
3099 return;
3100
3101 if (is_user && current->state != TASK_RUNNING)
3102 return;
3103
3104 if (!is_user)
3105 perf_callchain_kernel(regs, entry);
3106
3107 if (current->mm)
3108 perf_callchain_user(regs, entry);
3109}
3110
3111static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
3112
3113struct perf_callchain_entry *
3114perf_callchain(struct pt_regs *regs)
3115{
3116 struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry);
3117
3118 entry->nr = 0;
3119 perf_do_callchain(regs, entry);
3120 return entry;
3121}
This page took 0.164251 seconds and 5 git commands to generate.