ARM: perf: encode PMU name in arm_pmu structure
[deliverable/linux.git] / arch / arm / kernel / perf_event.c
1 #undef DEBUG
2
3 /*
4 * ARM performance counter support.
5 *
6 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
7 *
8 * ARMv7 support: Jean Pihet <jpihet@mvista.com>
9 * 2010 (c) MontaVista Software, LLC.
10 *
11 * This code is based on the sparc64 perf event code, which is in turn based
12 * on the x86 code. Callchain code is based on the ARM OProfile backtrace
13 * code.
14 */
15 #define pr_fmt(fmt) "hw perfevents: " fmt
16
17 #include <linux/interrupt.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/perf_event.h>
21 #include <linux/platform_device.h>
22 #include <linux/spinlock.h>
23 #include <linux/uaccess.h>
24
25 #include <asm/cputype.h>
26 #include <asm/irq.h>
27 #include <asm/irq_regs.h>
28 #include <asm/pmu.h>
29 #include <asm/stacktrace.h>
30
31 static struct platform_device *pmu_device;
32
33 /*
34 * Hardware lock to serialize accesses to PMU registers. Needed for the
35 * read/modify/write sequences.
36 */
37 DEFINE_SPINLOCK(pmu_lock);
38
39 /*
40 * ARMv6 supports a maximum of 3 events, starting from index 1. If we add
41 * another platform that supports more, we need to increase this to be the
42 * largest of all platforms.
43 *
44 * ARMv7 supports up to 32 events:
45 * cycle counter CCNT + 31 events counters CNT0..30.
46 * Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters.
47 */
48 #define ARMPMU_MAX_HWEVENTS 33
49
50 /* The events for a given CPU. */
51 struct cpu_hw_events {
52 /*
53 * The events that are active on the CPU for the given index. Index 0
54 * is reserved.
55 */
56 struct perf_event *events[ARMPMU_MAX_HWEVENTS];
57
58 /*
59 * A 1 bit for an index indicates that the counter is being used for
60 * an event. A 0 means that the counter can be used.
61 */
62 unsigned long used_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
63
64 /*
65 * A 1 bit for an index indicates that the counter is actively being
66 * used.
67 */
68 unsigned long active_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
69 };
70 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
71
72 struct arm_pmu {
73 enum arm_perf_pmu_ids id;
74 const char *name;
75 irqreturn_t (*handle_irq)(int irq_num, void *dev);
76 void (*enable)(struct hw_perf_event *evt, int idx);
77 void (*disable)(struct hw_perf_event *evt, int idx);
78 int (*get_event_idx)(struct cpu_hw_events *cpuc,
79 struct hw_perf_event *hwc);
80 u32 (*read_counter)(int idx);
81 void (*write_counter)(int idx, u32 val);
82 void (*start)(void);
83 void (*stop)(void);
84 const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
85 [PERF_COUNT_HW_CACHE_OP_MAX]
86 [PERF_COUNT_HW_CACHE_RESULT_MAX];
87 const unsigned (*event_map)[PERF_COUNT_HW_MAX];
88 u32 raw_event_mask;
89 int num_events;
90 u64 max_period;
91 };
92
93 /* Set at runtime when we know what CPU type we are. */
94 static const struct arm_pmu *armpmu;
95
96 enum arm_perf_pmu_ids
97 armpmu_get_pmu_id(void)
98 {
99 int id = -ENODEV;
100
101 if (armpmu != NULL)
102 id = armpmu->id;
103
104 return id;
105 }
106 EXPORT_SYMBOL_GPL(armpmu_get_pmu_id);
107
108 int
109 armpmu_get_max_events(void)
110 {
111 int max_events = 0;
112
113 if (armpmu != NULL)
114 max_events = armpmu->num_events;
115
116 return max_events;
117 }
118 EXPORT_SYMBOL_GPL(armpmu_get_max_events);
119
120 int perf_num_counters(void)
121 {
122 return armpmu_get_max_events();
123 }
124 EXPORT_SYMBOL_GPL(perf_num_counters);
125
126 #define HW_OP_UNSUPPORTED 0xFFFF
127
128 #define C(_x) \
129 PERF_COUNT_HW_CACHE_##_x
130
131 #define CACHE_OP_UNSUPPORTED 0xFFFF
132
133 static int
134 armpmu_map_cache_event(u64 config)
135 {
136 unsigned int cache_type, cache_op, cache_result, ret;
137
138 cache_type = (config >> 0) & 0xff;
139 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
140 return -EINVAL;
141
142 cache_op = (config >> 8) & 0xff;
143 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
144 return -EINVAL;
145
146 cache_result = (config >> 16) & 0xff;
147 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
148 return -EINVAL;
149
150 ret = (int)(*armpmu->cache_map)[cache_type][cache_op][cache_result];
151
152 if (ret == CACHE_OP_UNSUPPORTED)
153 return -ENOENT;
154
155 return ret;
156 }
157
158 static int
159 armpmu_map_event(u64 config)
160 {
161 int mapping = (*armpmu->event_map)[config];
162 return mapping == HW_OP_UNSUPPORTED ? -EOPNOTSUPP : mapping;
163 }
164
165 static int
166 armpmu_map_raw_event(u64 config)
167 {
168 return (int)(config & armpmu->raw_event_mask);
169 }
170
171 static int
172 armpmu_event_set_period(struct perf_event *event,
173 struct hw_perf_event *hwc,
174 int idx)
175 {
176 s64 left = local64_read(&hwc->period_left);
177 s64 period = hwc->sample_period;
178 int ret = 0;
179
180 if (unlikely(left <= -period)) {
181 left = period;
182 local64_set(&hwc->period_left, left);
183 hwc->last_period = period;
184 ret = 1;
185 }
186
187 if (unlikely(left <= 0)) {
188 left += period;
189 local64_set(&hwc->period_left, left);
190 hwc->last_period = period;
191 ret = 1;
192 }
193
194 if (left > (s64)armpmu->max_period)
195 left = armpmu->max_period;
196
197 local64_set(&hwc->prev_count, (u64)-left);
198
199 armpmu->write_counter(idx, (u64)(-left) & 0xffffffff);
200
201 perf_event_update_userpage(event);
202
203 return ret;
204 }
205
206 static u64
207 armpmu_event_update(struct perf_event *event,
208 struct hw_perf_event *hwc,
209 int idx)
210 {
211 int shift = 64 - 32;
212 s64 prev_raw_count, new_raw_count;
213 u64 delta;
214
215 again:
216 prev_raw_count = local64_read(&hwc->prev_count);
217 new_raw_count = armpmu->read_counter(idx);
218
219 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
220 new_raw_count) != prev_raw_count)
221 goto again;
222
223 delta = (new_raw_count << shift) - (prev_raw_count << shift);
224 delta >>= shift;
225
226 local64_add(delta, &event->count);
227 local64_sub(delta, &hwc->period_left);
228
229 return new_raw_count;
230 }
231
232 static void
233 armpmu_read(struct perf_event *event)
234 {
235 struct hw_perf_event *hwc = &event->hw;
236
237 /* Don't read disabled counters! */
238 if (hwc->idx < 0)
239 return;
240
241 armpmu_event_update(event, hwc, hwc->idx);
242 }
243
244 static void
245 armpmu_stop(struct perf_event *event, int flags)
246 {
247 struct hw_perf_event *hwc = &event->hw;
248
249 if (!armpmu)
250 return;
251
252 /*
253 * ARM pmu always has to update the counter, so ignore
254 * PERF_EF_UPDATE, see comments in armpmu_start().
255 */
256 if (!(hwc->state & PERF_HES_STOPPED)) {
257 armpmu->disable(hwc, hwc->idx);
258 barrier(); /* why? */
259 armpmu_event_update(event, hwc, hwc->idx);
260 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
261 }
262 }
263
264 static void
265 armpmu_start(struct perf_event *event, int flags)
266 {
267 struct hw_perf_event *hwc = &event->hw;
268
269 if (!armpmu)
270 return;
271
272 /*
273 * ARM pmu always has to reprogram the period, so ignore
274 * PERF_EF_RELOAD, see the comment below.
275 */
276 if (flags & PERF_EF_RELOAD)
277 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
278
279 hwc->state = 0;
280 /*
281 * Set the period again. Some counters can't be stopped, so when we
282 * were stopped we simply disabled the IRQ source and the counter
283 * may have been left counting. If we don't do this step then we may
284 * get an interrupt too soon or *way* too late if the overflow has
285 * happened since disabling.
286 */
287 armpmu_event_set_period(event, hwc, hwc->idx);
288 armpmu->enable(hwc, hwc->idx);
289 }
290
291 static void
292 armpmu_del(struct perf_event *event, int flags)
293 {
294 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
295 struct hw_perf_event *hwc = &event->hw;
296 int idx = hwc->idx;
297
298 WARN_ON(idx < 0);
299
300 clear_bit(idx, cpuc->active_mask);
301 armpmu_stop(event, PERF_EF_UPDATE);
302 cpuc->events[idx] = NULL;
303 clear_bit(idx, cpuc->used_mask);
304
305 perf_event_update_userpage(event);
306 }
307
308 static int
309 armpmu_add(struct perf_event *event, int flags)
310 {
311 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
312 struct hw_perf_event *hwc = &event->hw;
313 int idx;
314 int err = 0;
315
316 perf_pmu_disable(event->pmu);
317
318 /* If we don't have a space for the counter then finish early. */
319 idx = armpmu->get_event_idx(cpuc, hwc);
320 if (idx < 0) {
321 err = idx;
322 goto out;
323 }
324
325 /*
326 * If there is an event in the counter we are going to use then make
327 * sure it is disabled.
328 */
329 event->hw.idx = idx;
330 armpmu->disable(hwc, idx);
331 cpuc->events[idx] = event;
332 set_bit(idx, cpuc->active_mask);
333
334 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
335 if (flags & PERF_EF_START)
336 armpmu_start(event, PERF_EF_RELOAD);
337
338 /* Propagate our changes to the userspace mapping. */
339 perf_event_update_userpage(event);
340
341 out:
342 perf_pmu_enable(event->pmu);
343 return err;
344 }
345
346 static struct pmu pmu;
347
348 static int
349 validate_event(struct cpu_hw_events *cpuc,
350 struct perf_event *event)
351 {
352 struct hw_perf_event fake_event = event->hw;
353
354 if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
355 return 1;
356
357 return armpmu->get_event_idx(cpuc, &fake_event) >= 0;
358 }
359
360 static int
361 validate_group(struct perf_event *event)
362 {
363 struct perf_event *sibling, *leader = event->group_leader;
364 struct cpu_hw_events fake_pmu;
365
366 memset(&fake_pmu, 0, sizeof(fake_pmu));
367
368 if (!validate_event(&fake_pmu, leader))
369 return -ENOSPC;
370
371 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
372 if (!validate_event(&fake_pmu, sibling))
373 return -ENOSPC;
374 }
375
376 if (!validate_event(&fake_pmu, event))
377 return -ENOSPC;
378
379 return 0;
380 }
381
382 static int
383 armpmu_reserve_hardware(void)
384 {
385 int i, err = -ENODEV, irq;
386
387 pmu_device = reserve_pmu(ARM_PMU_DEVICE_CPU);
388 if (IS_ERR(pmu_device)) {
389 pr_warning("unable to reserve pmu\n");
390 return PTR_ERR(pmu_device);
391 }
392
393 init_pmu(ARM_PMU_DEVICE_CPU);
394
395 if (pmu_device->num_resources < 1) {
396 pr_err("no irqs for PMUs defined\n");
397 return -ENODEV;
398 }
399
400 for (i = 0; i < pmu_device->num_resources; ++i) {
401 irq = platform_get_irq(pmu_device, i);
402 if (irq < 0)
403 continue;
404
405 err = request_irq(irq, armpmu->handle_irq,
406 IRQF_DISABLED | IRQF_NOBALANCING,
407 "armpmu", NULL);
408 if (err) {
409 pr_warning("unable to request IRQ%d for ARM perf "
410 "counters\n", irq);
411 break;
412 }
413 }
414
415 if (err) {
416 for (i = i - 1; i >= 0; --i) {
417 irq = platform_get_irq(pmu_device, i);
418 if (irq >= 0)
419 free_irq(irq, NULL);
420 }
421 release_pmu(pmu_device);
422 pmu_device = NULL;
423 }
424
425 return err;
426 }
427
428 static void
429 armpmu_release_hardware(void)
430 {
431 int i, irq;
432
433 for (i = pmu_device->num_resources - 1; i >= 0; --i) {
434 irq = platform_get_irq(pmu_device, i);
435 if (irq >= 0)
436 free_irq(irq, NULL);
437 }
438 armpmu->stop();
439
440 release_pmu(pmu_device);
441 pmu_device = NULL;
442 }
443
444 static atomic_t active_events = ATOMIC_INIT(0);
445 static DEFINE_MUTEX(pmu_reserve_mutex);
446
447 static void
448 hw_perf_event_destroy(struct perf_event *event)
449 {
450 if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) {
451 armpmu_release_hardware();
452 mutex_unlock(&pmu_reserve_mutex);
453 }
454 }
455
456 static int
457 __hw_perf_event_init(struct perf_event *event)
458 {
459 struct hw_perf_event *hwc = &event->hw;
460 int mapping, err;
461
462 /* Decode the generic type into an ARM event identifier. */
463 if (PERF_TYPE_HARDWARE == event->attr.type) {
464 mapping = armpmu_map_event(event->attr.config);
465 } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
466 mapping = armpmu_map_cache_event(event->attr.config);
467 } else if (PERF_TYPE_RAW == event->attr.type) {
468 mapping = armpmu_map_raw_event(event->attr.config);
469 } else {
470 pr_debug("event type %x not supported\n", event->attr.type);
471 return -EOPNOTSUPP;
472 }
473
474 if (mapping < 0) {
475 pr_debug("event %x:%llx not supported\n", event->attr.type,
476 event->attr.config);
477 return mapping;
478 }
479
480 /*
481 * Check whether we need to exclude the counter from certain modes.
482 * The ARM performance counters are on all of the time so if someone
483 * has asked us for some excludes then we have to fail.
484 */
485 if (event->attr.exclude_kernel || event->attr.exclude_user ||
486 event->attr.exclude_hv || event->attr.exclude_idle) {
487 pr_debug("ARM performance counters do not support "
488 "mode exclusion\n");
489 return -EPERM;
490 }
491
492 /*
493 * We don't assign an index until we actually place the event onto
494 * hardware. Use -1 to signify that we haven't decided where to put it
495 * yet. For SMP systems, each core has it's own PMU so we can't do any
496 * clever allocation or constraints checking at this point.
497 */
498 hwc->idx = -1;
499
500 /*
501 * Store the event encoding into the config_base field. config and
502 * event_base are unused as the only 2 things we need to know are
503 * the event mapping and the counter to use. The counter to use is
504 * also the indx and the config_base is the event type.
505 */
506 hwc->config_base = (unsigned long)mapping;
507 hwc->config = 0;
508 hwc->event_base = 0;
509
510 if (!hwc->sample_period) {
511 hwc->sample_period = armpmu->max_period;
512 hwc->last_period = hwc->sample_period;
513 local64_set(&hwc->period_left, hwc->sample_period);
514 }
515
516 err = 0;
517 if (event->group_leader != event) {
518 err = validate_group(event);
519 if (err)
520 return -EINVAL;
521 }
522
523 return err;
524 }
525
526 static int armpmu_event_init(struct perf_event *event)
527 {
528 int err = 0;
529
530 switch (event->attr.type) {
531 case PERF_TYPE_RAW:
532 case PERF_TYPE_HARDWARE:
533 case PERF_TYPE_HW_CACHE:
534 break;
535
536 default:
537 return -ENOENT;
538 }
539
540 if (!armpmu)
541 return -ENODEV;
542
543 event->destroy = hw_perf_event_destroy;
544
545 if (!atomic_inc_not_zero(&active_events)) {
546 if (atomic_read(&active_events) > armpmu->num_events) {
547 atomic_dec(&active_events);
548 return -ENOSPC;
549 }
550
551 mutex_lock(&pmu_reserve_mutex);
552 if (atomic_read(&active_events) == 0) {
553 err = armpmu_reserve_hardware();
554 }
555
556 if (!err)
557 atomic_inc(&active_events);
558 mutex_unlock(&pmu_reserve_mutex);
559 }
560
561 if (err)
562 return err;
563
564 err = __hw_perf_event_init(event);
565 if (err)
566 hw_perf_event_destroy(event);
567
568 return err;
569 }
570
571 static void armpmu_enable(struct pmu *pmu)
572 {
573 /* Enable all of the perf events on hardware. */
574 int idx;
575 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
576
577 if (!armpmu)
578 return;
579
580 for (idx = 0; idx <= armpmu->num_events; ++idx) {
581 struct perf_event *event = cpuc->events[idx];
582
583 if (!event)
584 continue;
585
586 armpmu->enable(&event->hw, idx);
587 }
588
589 armpmu->start();
590 }
591
592 static void armpmu_disable(struct pmu *pmu)
593 {
594 if (armpmu)
595 armpmu->stop();
596 }
597
598 static struct pmu pmu = {
599 .pmu_enable = armpmu_enable,
600 .pmu_disable = armpmu_disable,
601 .event_init = armpmu_event_init,
602 .add = armpmu_add,
603 .del = armpmu_del,
604 .start = armpmu_start,
605 .stop = armpmu_stop,
606 .read = armpmu_read,
607 };
608
609 /*
610 * ARMv6 Performance counter handling code.
611 *
612 * ARMv6 has 2 configurable performance counters and a single cycle counter.
613 * They all share a single reset bit but can be written to zero so we can use
614 * that for a reset.
615 *
616 * The counters can't be individually enabled or disabled so when we remove
617 * one event and replace it with another we could get spurious counts from the
618 * wrong event. However, we can take advantage of the fact that the
619 * performance counters can export events to the event bus, and the event bus
620 * itself can be monitored. This requires that we *don't* export the events to
621 * the event bus. The procedure for disabling a configurable counter is:
622 * - change the counter to count the ETMEXTOUT[0] signal (0x20). This
623 * effectively stops the counter from counting.
624 * - disable the counter's interrupt generation (each counter has it's
625 * own interrupt enable bit).
626 * Once stopped, the counter value can be written as 0 to reset.
627 *
628 * To enable a counter:
629 * - enable the counter's interrupt generation.
630 * - set the new event type.
631 *
632 * Note: the dedicated cycle counter only counts cycles and can't be
633 * enabled/disabled independently of the others. When we want to disable the
634 * cycle counter, we have to just disable the interrupt reporting and start
635 * ignoring that counter. When re-enabling, we have to reset the value and
636 * enable the interrupt.
637 */
638
639 enum armv6_perf_types {
640 ARMV6_PERFCTR_ICACHE_MISS = 0x0,
641 ARMV6_PERFCTR_IBUF_STALL = 0x1,
642 ARMV6_PERFCTR_DDEP_STALL = 0x2,
643 ARMV6_PERFCTR_ITLB_MISS = 0x3,
644 ARMV6_PERFCTR_DTLB_MISS = 0x4,
645 ARMV6_PERFCTR_BR_EXEC = 0x5,
646 ARMV6_PERFCTR_BR_MISPREDICT = 0x6,
647 ARMV6_PERFCTR_INSTR_EXEC = 0x7,
648 ARMV6_PERFCTR_DCACHE_HIT = 0x9,
649 ARMV6_PERFCTR_DCACHE_ACCESS = 0xA,
650 ARMV6_PERFCTR_DCACHE_MISS = 0xB,
651 ARMV6_PERFCTR_DCACHE_WBACK = 0xC,
652 ARMV6_PERFCTR_SW_PC_CHANGE = 0xD,
653 ARMV6_PERFCTR_MAIN_TLB_MISS = 0xF,
654 ARMV6_PERFCTR_EXPL_D_ACCESS = 0x10,
655 ARMV6_PERFCTR_LSU_FULL_STALL = 0x11,
656 ARMV6_PERFCTR_WBUF_DRAINED = 0x12,
657 ARMV6_PERFCTR_CPU_CYCLES = 0xFF,
658 ARMV6_PERFCTR_NOP = 0x20,
659 };
660
661 enum armv6_counters {
662 ARMV6_CYCLE_COUNTER = 1,
663 ARMV6_COUNTER0,
664 ARMV6_COUNTER1,
665 };
666
667 /*
668 * The hardware events that we support. We do support cache operations but
669 * we have harvard caches and no way to combine instruction and data
670 * accesses/misses in hardware.
671 */
672 static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = {
673 [PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES,
674 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC,
675 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
676 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
677 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC,
678 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT,
679 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
680 };
681
682 static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
683 [PERF_COUNT_HW_CACHE_OP_MAX]
684 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
685 [C(L1D)] = {
686 /*
687 * The performance counters don't differentiate between read
688 * and write accesses/misses so this isn't strictly correct,
689 * but it's the best we can do. Writes and reads get
690 * combined.
691 */
692 [C(OP_READ)] = {
693 [C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS,
694 [C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS,
695 },
696 [C(OP_WRITE)] = {
697 [C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS,
698 [C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS,
699 },
700 [C(OP_PREFETCH)] = {
701 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
702 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
703 },
704 },
705 [C(L1I)] = {
706 [C(OP_READ)] = {
707 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
708 [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS,
709 },
710 [C(OP_WRITE)] = {
711 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
712 [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS,
713 },
714 [C(OP_PREFETCH)] = {
715 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
716 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
717 },
718 },
719 [C(LL)] = {
720 [C(OP_READ)] = {
721 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
722 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
723 },
724 [C(OP_WRITE)] = {
725 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
726 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
727 },
728 [C(OP_PREFETCH)] = {
729 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
730 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
731 },
732 },
733 [C(DTLB)] = {
734 /*
735 * The ARM performance counters can count micro DTLB misses,
736 * micro ITLB misses and main TLB misses. There isn't an event
737 * for TLB misses, so use the micro misses here and if users
738 * want the main TLB misses they can use a raw counter.
739 */
740 [C(OP_READ)] = {
741 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
742 [C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS,
743 },
744 [C(OP_WRITE)] = {
745 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
746 [C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS,
747 },
748 [C(OP_PREFETCH)] = {
749 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
750 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
751 },
752 },
753 [C(ITLB)] = {
754 [C(OP_READ)] = {
755 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
756 [C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS,
757 },
758 [C(OP_WRITE)] = {
759 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
760 [C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS,
761 },
762 [C(OP_PREFETCH)] = {
763 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
764 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
765 },
766 },
767 [C(BPU)] = {
768 [C(OP_READ)] = {
769 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
770 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
771 },
772 [C(OP_WRITE)] = {
773 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
774 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
775 },
776 [C(OP_PREFETCH)] = {
777 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
778 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
779 },
780 },
781 };
782
783 enum armv6mpcore_perf_types {
784 ARMV6MPCORE_PERFCTR_ICACHE_MISS = 0x0,
785 ARMV6MPCORE_PERFCTR_IBUF_STALL = 0x1,
786 ARMV6MPCORE_PERFCTR_DDEP_STALL = 0x2,
787 ARMV6MPCORE_PERFCTR_ITLB_MISS = 0x3,
788 ARMV6MPCORE_PERFCTR_DTLB_MISS = 0x4,
789 ARMV6MPCORE_PERFCTR_BR_EXEC = 0x5,
790 ARMV6MPCORE_PERFCTR_BR_NOTPREDICT = 0x6,
791 ARMV6MPCORE_PERFCTR_BR_MISPREDICT = 0x7,
792 ARMV6MPCORE_PERFCTR_INSTR_EXEC = 0x8,
793 ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS = 0xA,
794 ARMV6MPCORE_PERFCTR_DCACHE_RDMISS = 0xB,
795 ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS = 0xC,
796 ARMV6MPCORE_PERFCTR_DCACHE_WRMISS = 0xD,
797 ARMV6MPCORE_PERFCTR_DCACHE_EVICTION = 0xE,
798 ARMV6MPCORE_PERFCTR_SW_PC_CHANGE = 0xF,
799 ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS = 0x10,
800 ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS = 0x11,
801 ARMV6MPCORE_PERFCTR_LSU_FULL_STALL = 0x12,
802 ARMV6MPCORE_PERFCTR_WBUF_DRAINED = 0x13,
803 ARMV6MPCORE_PERFCTR_CPU_CYCLES = 0xFF,
804 };
805
806 /*
807 * The hardware events that we support. We do support cache operations but
808 * we have harvard caches and no way to combine instruction and data
809 * accesses/misses in hardware.
810 */
811 static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = {
812 [PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES,
813 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC,
814 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
815 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
816 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC,
817 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
818 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
819 };
820
821 static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
822 [PERF_COUNT_HW_CACHE_OP_MAX]
823 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
824 [C(L1D)] = {
825 [C(OP_READ)] = {
826 [C(RESULT_ACCESS)] =
827 ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS,
828 [C(RESULT_MISS)] =
829 ARMV6MPCORE_PERFCTR_DCACHE_RDMISS,
830 },
831 [C(OP_WRITE)] = {
832 [C(RESULT_ACCESS)] =
833 ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS,
834 [C(RESULT_MISS)] =
835 ARMV6MPCORE_PERFCTR_DCACHE_WRMISS,
836 },
837 [C(OP_PREFETCH)] = {
838 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
839 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
840 },
841 },
842 [C(L1I)] = {
843 [C(OP_READ)] = {
844 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
845 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
846 },
847 [C(OP_WRITE)] = {
848 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
849 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
850 },
851 [C(OP_PREFETCH)] = {
852 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
853 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
854 },
855 },
856 [C(LL)] = {
857 [C(OP_READ)] = {
858 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
859 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
860 },
861 [C(OP_WRITE)] = {
862 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
863 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
864 },
865 [C(OP_PREFETCH)] = {
866 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
867 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
868 },
869 },
870 [C(DTLB)] = {
871 /*
872 * The ARM performance counters can count micro DTLB misses,
873 * micro ITLB misses and main TLB misses. There isn't an event
874 * for TLB misses, so use the micro misses here and if users
875 * want the main TLB misses they can use a raw counter.
876 */
877 [C(OP_READ)] = {
878 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
879 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS,
880 },
881 [C(OP_WRITE)] = {
882 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
883 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS,
884 },
885 [C(OP_PREFETCH)] = {
886 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
887 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
888 },
889 },
890 [C(ITLB)] = {
891 [C(OP_READ)] = {
892 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
893 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS,
894 },
895 [C(OP_WRITE)] = {
896 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
897 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS,
898 },
899 [C(OP_PREFETCH)] = {
900 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
901 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
902 },
903 },
904 [C(BPU)] = {
905 [C(OP_READ)] = {
906 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
907 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
908 },
909 [C(OP_WRITE)] = {
910 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
911 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
912 },
913 [C(OP_PREFETCH)] = {
914 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
915 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
916 },
917 },
918 };
919
920 static inline unsigned long
921 armv6_pmcr_read(void)
922 {
923 u32 val;
924 asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r"(val));
925 return val;
926 }
927
928 static inline void
929 armv6_pmcr_write(unsigned long val)
930 {
931 asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r"(val));
932 }
933
934 #define ARMV6_PMCR_ENABLE (1 << 0)
935 #define ARMV6_PMCR_CTR01_RESET (1 << 1)
936 #define ARMV6_PMCR_CCOUNT_RESET (1 << 2)
937 #define ARMV6_PMCR_CCOUNT_DIV (1 << 3)
938 #define ARMV6_PMCR_COUNT0_IEN (1 << 4)
939 #define ARMV6_PMCR_COUNT1_IEN (1 << 5)
940 #define ARMV6_PMCR_CCOUNT_IEN (1 << 6)
941 #define ARMV6_PMCR_COUNT0_OVERFLOW (1 << 8)
942 #define ARMV6_PMCR_COUNT1_OVERFLOW (1 << 9)
943 #define ARMV6_PMCR_CCOUNT_OVERFLOW (1 << 10)
944 #define ARMV6_PMCR_EVT_COUNT0_SHIFT 20
945 #define ARMV6_PMCR_EVT_COUNT0_MASK (0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT)
946 #define ARMV6_PMCR_EVT_COUNT1_SHIFT 12
947 #define ARMV6_PMCR_EVT_COUNT1_MASK (0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT)
948
949 #define ARMV6_PMCR_OVERFLOWED_MASK \
950 (ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \
951 ARMV6_PMCR_CCOUNT_OVERFLOW)
952
953 static inline int
954 armv6_pmcr_has_overflowed(unsigned long pmcr)
955 {
956 return (pmcr & ARMV6_PMCR_OVERFLOWED_MASK);
957 }
958
959 static inline int
960 armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
961 enum armv6_counters counter)
962 {
963 int ret = 0;
964
965 if (ARMV6_CYCLE_COUNTER == counter)
966 ret = pmcr & ARMV6_PMCR_CCOUNT_OVERFLOW;
967 else if (ARMV6_COUNTER0 == counter)
968 ret = pmcr & ARMV6_PMCR_COUNT0_OVERFLOW;
969 else if (ARMV6_COUNTER1 == counter)
970 ret = pmcr & ARMV6_PMCR_COUNT1_OVERFLOW;
971 else
972 WARN_ONCE(1, "invalid counter number (%d)\n", counter);
973
974 return ret;
975 }
976
977 static inline u32
978 armv6pmu_read_counter(int counter)
979 {
980 unsigned long value = 0;
981
982 if (ARMV6_CYCLE_COUNTER == counter)
983 asm volatile("mrc p15, 0, %0, c15, c12, 1" : "=r"(value));
984 else if (ARMV6_COUNTER0 == counter)
985 asm volatile("mrc p15, 0, %0, c15, c12, 2" : "=r"(value));
986 else if (ARMV6_COUNTER1 == counter)
987 asm volatile("mrc p15, 0, %0, c15, c12, 3" : "=r"(value));
988 else
989 WARN_ONCE(1, "invalid counter number (%d)\n", counter);
990
991 return value;
992 }
993
994 static inline void
995 armv6pmu_write_counter(int counter,
996 u32 value)
997 {
998 if (ARMV6_CYCLE_COUNTER == counter)
999 asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value));
1000 else if (ARMV6_COUNTER0 == counter)
1001 asm volatile("mcr p15, 0, %0, c15, c12, 2" : : "r"(value));
1002 else if (ARMV6_COUNTER1 == counter)
1003 asm volatile("mcr p15, 0, %0, c15, c12, 3" : : "r"(value));
1004 else
1005 WARN_ONCE(1, "invalid counter number (%d)\n", counter);
1006 }
1007
1008 void
1009 armv6pmu_enable_event(struct hw_perf_event *hwc,
1010 int idx)
1011 {
1012 unsigned long val, mask, evt, flags;
1013
1014 if (ARMV6_CYCLE_COUNTER == idx) {
1015 mask = 0;
1016 evt = ARMV6_PMCR_CCOUNT_IEN;
1017 } else if (ARMV6_COUNTER0 == idx) {
1018 mask = ARMV6_PMCR_EVT_COUNT0_MASK;
1019 evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT0_SHIFT) |
1020 ARMV6_PMCR_COUNT0_IEN;
1021 } else if (ARMV6_COUNTER1 == idx) {
1022 mask = ARMV6_PMCR_EVT_COUNT1_MASK;
1023 evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT1_SHIFT) |
1024 ARMV6_PMCR_COUNT1_IEN;
1025 } else {
1026 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
1027 return;
1028 }
1029
1030 /*
1031 * Mask out the current event and set the counter to count the event
1032 * that we're interested in.
1033 */
1034 spin_lock_irqsave(&pmu_lock, flags);
1035 val = armv6_pmcr_read();
1036 val &= ~mask;
1037 val |= evt;
1038 armv6_pmcr_write(val);
1039 spin_unlock_irqrestore(&pmu_lock, flags);
1040 }
1041
1042 static irqreturn_t
1043 armv6pmu_handle_irq(int irq_num,
1044 void *dev)
1045 {
1046 unsigned long pmcr = armv6_pmcr_read();
1047 struct perf_sample_data data;
1048 struct cpu_hw_events *cpuc;
1049 struct pt_regs *regs;
1050 int idx;
1051
1052 if (!armv6_pmcr_has_overflowed(pmcr))
1053 return IRQ_NONE;
1054
1055 regs = get_irq_regs();
1056
1057 /*
1058 * The interrupts are cleared by writing the overflow flags back to
1059 * the control register. All of the other bits don't have any effect
1060 * if they are rewritten, so write the whole value back.
1061 */
1062 armv6_pmcr_write(pmcr);
1063
1064 perf_sample_data_init(&data, 0);
1065
1066 cpuc = &__get_cpu_var(cpu_hw_events);
1067 for (idx = 0; idx <= armpmu->num_events; ++idx) {
1068 struct perf_event *event = cpuc->events[idx];
1069 struct hw_perf_event *hwc;
1070
1071 if (!test_bit(idx, cpuc->active_mask))
1072 continue;
1073
1074 /*
1075 * We have a single interrupt for all counters. Check that
1076 * each counter has overflowed before we process it.
1077 */
1078 if (!armv6_pmcr_counter_has_overflowed(pmcr, idx))
1079 continue;
1080
1081 hwc = &event->hw;
1082 armpmu_event_update(event, hwc, idx);
1083 data.period = event->hw.last_period;
1084 if (!armpmu_event_set_period(event, hwc, idx))
1085 continue;
1086
1087 if (perf_event_overflow(event, 0, &data, regs))
1088 armpmu->disable(hwc, idx);
1089 }
1090
1091 /*
1092 * Handle the pending perf events.
1093 *
1094 * Note: this call *must* be run with interrupts disabled. For
1095 * platforms that can have the PMU interrupts raised as an NMI, this
1096 * will not work.
1097 */
1098 irq_work_run();
1099
1100 return IRQ_HANDLED;
1101 }
1102
1103 static void
1104 armv6pmu_start(void)
1105 {
1106 unsigned long flags, val;
1107
1108 spin_lock_irqsave(&pmu_lock, flags);
1109 val = armv6_pmcr_read();
1110 val |= ARMV6_PMCR_ENABLE;
1111 armv6_pmcr_write(val);
1112 spin_unlock_irqrestore(&pmu_lock, flags);
1113 }
1114
1115 static void
1116 armv6pmu_stop(void)
1117 {
1118 unsigned long flags, val;
1119
1120 spin_lock_irqsave(&pmu_lock, flags);
1121 val = armv6_pmcr_read();
1122 val &= ~ARMV6_PMCR_ENABLE;
1123 armv6_pmcr_write(val);
1124 spin_unlock_irqrestore(&pmu_lock, flags);
1125 }
1126
1127 static int
1128 armv6pmu_get_event_idx(struct cpu_hw_events *cpuc,
1129 struct hw_perf_event *event)
1130 {
1131 /* Always place a cycle counter into the cycle counter. */
1132 if (ARMV6_PERFCTR_CPU_CYCLES == event->config_base) {
1133 if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask))
1134 return -EAGAIN;
1135
1136 return ARMV6_CYCLE_COUNTER;
1137 } else {
1138 /*
1139 * For anything other than a cycle counter, try and use
1140 * counter0 and counter1.
1141 */
1142 if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask)) {
1143 return ARMV6_COUNTER1;
1144 }
1145
1146 if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask)) {
1147 return ARMV6_COUNTER0;
1148 }
1149
1150 /* The counters are all in use. */
1151 return -EAGAIN;
1152 }
1153 }
1154
1155 static void
1156 armv6pmu_disable_event(struct hw_perf_event *hwc,
1157 int idx)
1158 {
1159 unsigned long val, mask, evt, flags;
1160
1161 if (ARMV6_CYCLE_COUNTER == idx) {
1162 mask = ARMV6_PMCR_CCOUNT_IEN;
1163 evt = 0;
1164 } else if (ARMV6_COUNTER0 == idx) {
1165 mask = ARMV6_PMCR_COUNT0_IEN | ARMV6_PMCR_EVT_COUNT0_MASK;
1166 evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT0_SHIFT;
1167 } else if (ARMV6_COUNTER1 == idx) {
1168 mask = ARMV6_PMCR_COUNT1_IEN | ARMV6_PMCR_EVT_COUNT1_MASK;
1169 evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT1_SHIFT;
1170 } else {
1171 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
1172 return;
1173 }
1174
1175 /*
1176 * Mask out the current event and set the counter to count the number
1177 * of ETM bus signal assertion cycles. The external reporting should
1178 * be disabled and so this should never increment.
1179 */
1180 spin_lock_irqsave(&pmu_lock, flags);
1181 val = armv6_pmcr_read();
1182 val &= ~mask;
1183 val |= evt;
1184 armv6_pmcr_write(val);
1185 spin_unlock_irqrestore(&pmu_lock, flags);
1186 }
1187
1188 static void
1189 armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
1190 int idx)
1191 {
1192 unsigned long val, mask, flags, evt = 0;
1193
1194 if (ARMV6_CYCLE_COUNTER == idx) {
1195 mask = ARMV6_PMCR_CCOUNT_IEN;
1196 } else if (ARMV6_COUNTER0 == idx) {
1197 mask = ARMV6_PMCR_COUNT0_IEN;
1198 } else if (ARMV6_COUNTER1 == idx) {
1199 mask = ARMV6_PMCR_COUNT1_IEN;
1200 } else {
1201 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
1202 return;
1203 }
1204
1205 /*
1206 * Unlike UP ARMv6, we don't have a way of stopping the counters. We
1207 * simply disable the interrupt reporting.
1208 */
1209 spin_lock_irqsave(&pmu_lock, flags);
1210 val = armv6_pmcr_read();
1211 val &= ~mask;
1212 val |= evt;
1213 armv6_pmcr_write(val);
1214 spin_unlock_irqrestore(&pmu_lock, flags);
1215 }
1216
1217 static const struct arm_pmu armv6pmu = {
1218 .id = ARM_PERF_PMU_ID_V6,
1219 .name = "v6",
1220 .handle_irq = armv6pmu_handle_irq,
1221 .enable = armv6pmu_enable_event,
1222 .disable = armv6pmu_disable_event,
1223 .read_counter = armv6pmu_read_counter,
1224 .write_counter = armv6pmu_write_counter,
1225 .get_event_idx = armv6pmu_get_event_idx,
1226 .start = armv6pmu_start,
1227 .stop = armv6pmu_stop,
1228 .cache_map = &armv6_perf_cache_map,
1229 .event_map = &armv6_perf_map,
1230 .raw_event_mask = 0xFF,
1231 .num_events = 3,
1232 .max_period = (1LLU << 32) - 1,
1233 };
1234
1235 const struct arm_pmu *__init armv6pmu_init(void)
1236 {
1237 return &armv6pmu;
1238 }
1239
1240 /*
1241 * ARMv6mpcore is almost identical to single core ARMv6 with the exception
1242 * that some of the events have different enumerations and that there is no
1243 * *hack* to stop the programmable counters. To stop the counters we simply
1244 * disable the interrupt reporting and update the event. When unthrottling we
1245 * reset the period and enable the interrupt reporting.
1246 */
1247 static const struct arm_pmu armv6mpcore_pmu = {
1248 .id = ARM_PERF_PMU_ID_V6MP,
1249 .name = "v6mpcore",
1250 .handle_irq = armv6pmu_handle_irq,
1251 .enable = armv6pmu_enable_event,
1252 .disable = armv6mpcore_pmu_disable_event,
1253 .read_counter = armv6pmu_read_counter,
1254 .write_counter = armv6pmu_write_counter,
1255 .get_event_idx = armv6pmu_get_event_idx,
1256 .start = armv6pmu_start,
1257 .stop = armv6pmu_stop,
1258 .cache_map = &armv6mpcore_perf_cache_map,
1259 .event_map = &armv6mpcore_perf_map,
1260 .raw_event_mask = 0xFF,
1261 .num_events = 3,
1262 .max_period = (1LLU << 32) - 1,
1263 };
1264
1265 const struct arm_pmu *__init armv6mpcore_pmu_init(void)
1266 {
1267 return &armv6mpcore_pmu;
1268 }
1269
1270 /*
1271 * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
1272 *
1273 * Copied from ARMv6 code, with the low level code inspired
1274 * by the ARMv7 Oprofile code.
1275 *
1276 * Cortex-A8 has up to 4 configurable performance counters and
1277 * a single cycle counter.
1278 * Cortex-A9 has up to 31 configurable performance counters and
1279 * a single cycle counter.
1280 *
1281 * All counters can be enabled/disabled and IRQ masked separately. The cycle
1282 * counter and all 4 performance counters together can be reset separately.
1283 */
1284
1285 /* Common ARMv7 event types */
1286 enum armv7_perf_types {
1287 ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
1288 ARMV7_PERFCTR_IFETCH_MISS = 0x01,
1289 ARMV7_PERFCTR_ITLB_MISS = 0x02,
1290 ARMV7_PERFCTR_DCACHE_REFILL = 0x03,
1291 ARMV7_PERFCTR_DCACHE_ACCESS = 0x04,
1292 ARMV7_PERFCTR_DTLB_REFILL = 0x05,
1293 ARMV7_PERFCTR_DREAD = 0x06,
1294 ARMV7_PERFCTR_DWRITE = 0x07,
1295
1296 ARMV7_PERFCTR_EXC_TAKEN = 0x09,
1297 ARMV7_PERFCTR_EXC_EXECUTED = 0x0A,
1298 ARMV7_PERFCTR_CID_WRITE = 0x0B,
1299 /* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
1300 * It counts:
1301 * - all branch instructions,
1302 * - instructions that explicitly write the PC,
1303 * - exception generating instructions.
1304 */
1305 ARMV7_PERFCTR_PC_WRITE = 0x0C,
1306 ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D,
1307 ARMV7_PERFCTR_UNALIGNED_ACCESS = 0x0F,
1308 ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
1309 ARMV7_PERFCTR_CLOCK_CYCLES = 0x11,
1310
1311 ARMV7_PERFCTR_PC_BRANCH_MIS_USED = 0x12,
1312
1313 ARMV7_PERFCTR_CPU_CYCLES = 0xFF
1314 };
1315
1316 /* ARMv7 Cortex-A8 specific event types */
1317 enum armv7_a8_perf_types {
1318 ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
1319
1320 ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
1321
1322 ARMV7_PERFCTR_WRITE_BUFFER_FULL = 0x40,
1323 ARMV7_PERFCTR_L2_STORE_MERGED = 0x41,
1324 ARMV7_PERFCTR_L2_STORE_BUFF = 0x42,
1325 ARMV7_PERFCTR_L2_ACCESS = 0x43,
1326 ARMV7_PERFCTR_L2_CACH_MISS = 0x44,
1327 ARMV7_PERFCTR_AXI_READ_CYCLES = 0x45,
1328 ARMV7_PERFCTR_AXI_WRITE_CYCLES = 0x46,
1329 ARMV7_PERFCTR_MEMORY_REPLAY = 0x47,
1330 ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY = 0x48,
1331 ARMV7_PERFCTR_L1_DATA_MISS = 0x49,
1332 ARMV7_PERFCTR_L1_INST_MISS = 0x4A,
1333 ARMV7_PERFCTR_L1_DATA_COLORING = 0x4B,
1334 ARMV7_PERFCTR_L1_NEON_DATA = 0x4C,
1335 ARMV7_PERFCTR_L1_NEON_CACH_DATA = 0x4D,
1336 ARMV7_PERFCTR_L2_NEON = 0x4E,
1337 ARMV7_PERFCTR_L2_NEON_HIT = 0x4F,
1338 ARMV7_PERFCTR_L1_INST = 0x50,
1339 ARMV7_PERFCTR_PC_RETURN_MIS_PRED = 0x51,
1340 ARMV7_PERFCTR_PC_BRANCH_FAILED = 0x52,
1341 ARMV7_PERFCTR_PC_BRANCH_TAKEN = 0x53,
1342 ARMV7_PERFCTR_PC_BRANCH_EXECUTED = 0x54,
1343 ARMV7_PERFCTR_OP_EXECUTED = 0x55,
1344 ARMV7_PERFCTR_CYCLES_INST_STALL = 0x56,
1345 ARMV7_PERFCTR_CYCLES_INST = 0x57,
1346 ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL = 0x58,
1347 ARMV7_PERFCTR_CYCLES_NEON_INST_STALL = 0x59,
1348 ARMV7_PERFCTR_NEON_CYCLES = 0x5A,
1349
1350 ARMV7_PERFCTR_PMU0_EVENTS = 0x70,
1351 ARMV7_PERFCTR_PMU1_EVENTS = 0x71,
1352 ARMV7_PERFCTR_PMU_EVENTS = 0x72,
1353 };
1354
1355 /* ARMv7 Cortex-A9 specific event types */
1356 enum armv7_a9_perf_types {
1357 ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC = 0x40,
1358 ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC = 0x41,
1359 ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC = 0x42,
1360
1361 ARMV7_PERFCTR_COHERENT_LINE_MISS = 0x50,
1362 ARMV7_PERFCTR_COHERENT_LINE_HIT = 0x51,
1363
1364 ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES = 0x60,
1365 ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES = 0x61,
1366 ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES = 0x62,
1367 ARMV7_PERFCTR_STREX_EXECUTED_PASSED = 0x63,
1368 ARMV7_PERFCTR_STREX_EXECUTED_FAILED = 0x64,
1369 ARMV7_PERFCTR_DATA_EVICTION = 0x65,
1370 ARMV7_PERFCTR_ISSUE_STAGE_NO_INST = 0x66,
1371 ARMV7_PERFCTR_ISSUE_STAGE_EMPTY = 0x67,
1372 ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE = 0x68,
1373
1374 ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS = 0x6E,
1375
1376 ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST = 0x70,
1377 ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST = 0x71,
1378 ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST = 0x72,
1379 ARMV7_PERFCTR_FP_EXECUTED_INST = 0x73,
1380 ARMV7_PERFCTR_NEON_EXECUTED_INST = 0x74,
1381
1382 ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES = 0x80,
1383 ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES = 0x81,
1384 ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES = 0x82,
1385 ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES = 0x83,
1386 ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES = 0x84,
1387 ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES = 0x85,
1388 ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES = 0x86,
1389
1390 ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES = 0x8A,
1391 ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES = 0x8B,
1392
1393 ARMV7_PERFCTR_ISB_INST = 0x90,
1394 ARMV7_PERFCTR_DSB_INST = 0x91,
1395 ARMV7_PERFCTR_DMB_INST = 0x92,
1396 ARMV7_PERFCTR_EXT_INTERRUPTS = 0x93,
1397
1398 ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED = 0xA0,
1399 ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED = 0xA1,
1400 ARMV7_PERFCTR_PLE_FIFO_FLUSH = 0xA2,
1401 ARMV7_PERFCTR_PLE_RQST_COMPLETED = 0xA3,
1402 ARMV7_PERFCTR_PLE_FIFO_OVERFLOW = 0xA4,
1403 ARMV7_PERFCTR_PLE_RQST_PROG = 0xA5
1404 };
1405
1406 /*
1407 * Cortex-A8 HW events mapping
1408 *
1409 * The hardware events that we support. We do support cache operations but
1410 * we have harvard caches and no way to combine instruction and data
1411 * accesses/misses in hardware.
1412 */
1413 static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
1414 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
1415 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
1416 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
1417 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
1418 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
1419 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1420 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
1421 };
1422
1423 static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
1424 [PERF_COUNT_HW_CACHE_OP_MAX]
1425 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1426 [C(L1D)] = {
1427 /*
1428 * The performance counters don't differentiate between read
1429 * and write accesses/misses so this isn't strictly correct,
1430 * but it's the best we can do. Writes and reads get
1431 * combined.
1432 */
1433 [C(OP_READ)] = {
1434 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
1435 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
1436 },
1437 [C(OP_WRITE)] = {
1438 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
1439 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
1440 },
1441 [C(OP_PREFETCH)] = {
1442 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1443 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1444 },
1445 },
1446 [C(L1I)] = {
1447 [C(OP_READ)] = {
1448 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST,
1449 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS,
1450 },
1451 [C(OP_WRITE)] = {
1452 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST,
1453 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS,
1454 },
1455 [C(OP_PREFETCH)] = {
1456 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1457 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1458 },
1459 },
1460 [C(LL)] = {
1461 [C(OP_READ)] = {
1462 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS,
1463 [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS,
1464 },
1465 [C(OP_WRITE)] = {
1466 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS,
1467 [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS,
1468 },
1469 [C(OP_PREFETCH)] = {
1470 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1471 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1472 },
1473 },
1474 [C(DTLB)] = {
1475 /*
1476 * Only ITLB misses and DTLB refills are supported.
1477 * If users want the DTLB refills misses a raw counter
1478 * must be used.
1479 */
1480 [C(OP_READ)] = {
1481 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1482 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
1483 },
1484 [C(OP_WRITE)] = {
1485 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1486 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
1487 },
1488 [C(OP_PREFETCH)] = {
1489 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1490 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1491 },
1492 },
1493 [C(ITLB)] = {
1494 [C(OP_READ)] = {
1495 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1496 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
1497 },
1498 [C(OP_WRITE)] = {
1499 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1500 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
1501 },
1502 [C(OP_PREFETCH)] = {
1503 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1504 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1505 },
1506 },
1507 [C(BPU)] = {
1508 [C(OP_READ)] = {
1509 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
1510 [C(RESULT_MISS)]
1511 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1512 },
1513 [C(OP_WRITE)] = {
1514 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
1515 [C(RESULT_MISS)]
1516 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1517 },
1518 [C(OP_PREFETCH)] = {
1519 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1520 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1521 },
1522 },
1523 };
1524
1525 /*
1526 * Cortex-A9 HW events mapping
1527 */
1528 static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
1529 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
1530 [PERF_COUNT_HW_INSTRUCTIONS] =
1531 ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE,
1532 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_COHERENT_LINE_HIT,
1533 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_COHERENT_LINE_MISS,
1534 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
1535 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1536 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
1537 };
1538
1539 static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
1540 [PERF_COUNT_HW_CACHE_OP_MAX]
1541 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1542 [C(L1D)] = {
1543 /*
1544 * The performance counters don't differentiate between read
1545 * and write accesses/misses so this isn't strictly correct,
1546 * but it's the best we can do. Writes and reads get
1547 * combined.
1548 */
1549 [C(OP_READ)] = {
1550 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
1551 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
1552 },
1553 [C(OP_WRITE)] = {
1554 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
1555 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
1556 },
1557 [C(OP_PREFETCH)] = {
1558 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1559 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1560 },
1561 },
1562 [C(L1I)] = {
1563 [C(OP_READ)] = {
1564 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1565 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
1566 },
1567 [C(OP_WRITE)] = {
1568 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1569 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
1570 },
1571 [C(OP_PREFETCH)] = {
1572 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1573 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1574 },
1575 },
1576 [C(LL)] = {
1577 [C(OP_READ)] = {
1578 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1579 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1580 },
1581 [C(OP_WRITE)] = {
1582 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1583 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1584 },
1585 [C(OP_PREFETCH)] = {
1586 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1587 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1588 },
1589 },
1590 [C(DTLB)] = {
1591 /*
1592 * Only ITLB misses and DTLB refills are supported.
1593 * If users want the DTLB refills misses a raw counter
1594 * must be used.
1595 */
1596 [C(OP_READ)] = {
1597 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1598 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
1599 },
1600 [C(OP_WRITE)] = {
1601 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1602 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
1603 },
1604 [C(OP_PREFETCH)] = {
1605 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1606 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1607 },
1608 },
1609 [C(ITLB)] = {
1610 [C(OP_READ)] = {
1611 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1612 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
1613 },
1614 [C(OP_WRITE)] = {
1615 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1616 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
1617 },
1618 [C(OP_PREFETCH)] = {
1619 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1620 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1621 },
1622 },
1623 [C(BPU)] = {
1624 [C(OP_READ)] = {
1625 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
1626 [C(RESULT_MISS)]
1627 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1628 },
1629 [C(OP_WRITE)] = {
1630 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
1631 [C(RESULT_MISS)]
1632 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1633 },
1634 [C(OP_PREFETCH)] = {
1635 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1636 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1637 },
1638 },
1639 };
1640
1641 /*
1642 * Perf Events counters
1643 */
1644 enum armv7_counters {
1645 ARMV7_CYCLE_COUNTER = 1, /* Cycle counter */
1646 ARMV7_COUNTER0 = 2, /* First event counter */
1647 };
1648
1649 /*
1650 * The cycle counter is ARMV7_CYCLE_COUNTER.
1651 * The first event counter is ARMV7_COUNTER0.
1652 * The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1).
1653 */
1654 #define ARMV7_COUNTER_LAST (ARMV7_COUNTER0 + armpmu->num_events - 1)
1655
1656 /*
1657 * ARMv7 low level PMNC access
1658 */
1659
1660 /*
1661 * Per-CPU PMNC: config reg
1662 */
1663 #define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
1664 #define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
1665 #define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
1666 #define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
1667 #define ARMV7_PMNC_X (1 << 4) /* Export to ETM */
1668 #define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
1669 #define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */
1670 #define ARMV7_PMNC_N_MASK 0x1f
1671 #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
1672
1673 /*
1674 * Available counters
1675 */
1676 #define ARMV7_CNT0 0 /* First event counter */
1677 #define ARMV7_CCNT 31 /* Cycle counter */
1678
1679 /* Perf Event to low level counters mapping */
1680 #define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0)
1681
1682 /*
1683 * CNTENS: counters enable reg
1684 */
1685 #define ARMV7_CNTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1686 #define ARMV7_CNTENS_C (1 << ARMV7_CCNT)
1687
1688 /*
1689 * CNTENC: counters disable reg
1690 */
1691 #define ARMV7_CNTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1692 #define ARMV7_CNTENC_C (1 << ARMV7_CCNT)
1693
1694 /*
1695 * INTENS: counters overflow interrupt enable reg
1696 */
1697 #define ARMV7_INTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1698 #define ARMV7_INTENS_C (1 << ARMV7_CCNT)
1699
1700 /*
1701 * INTENC: counters overflow interrupt disable reg
1702 */
1703 #define ARMV7_INTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1704 #define ARMV7_INTENC_C (1 << ARMV7_CCNT)
1705
1706 /*
1707 * EVTSEL: Event selection reg
1708 */
1709 #define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */
1710
1711 /*
1712 * SELECT: Counter selection reg
1713 */
1714 #define ARMV7_SELECT_MASK 0x1f /* Mask for writable bits */
1715
1716 /*
1717 * FLAG: counters overflow flag status reg
1718 */
1719 #define ARMV7_FLAG_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1720 #define ARMV7_FLAG_C (1 << ARMV7_CCNT)
1721 #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
1722 #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
1723
1724 static inline unsigned long armv7_pmnc_read(void)
1725 {
1726 u32 val;
1727 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
1728 return val;
1729 }
1730
1731 static inline void armv7_pmnc_write(unsigned long val)
1732 {
1733 val &= ARMV7_PMNC_MASK;
1734 asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
1735 }
1736
1737 static inline int armv7_pmnc_has_overflowed(unsigned long pmnc)
1738 {
1739 return pmnc & ARMV7_OVERFLOWED_MASK;
1740 }
1741
1742 static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc,
1743 enum armv7_counters counter)
1744 {
1745 int ret = 0;
1746
1747 if (counter == ARMV7_CYCLE_COUNTER)
1748 ret = pmnc & ARMV7_FLAG_C;
1749 else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST))
1750 ret = pmnc & ARMV7_FLAG_P(counter);
1751 else
1752 pr_err("CPU%u checking wrong counter %d overflow status\n",
1753 smp_processor_id(), counter);
1754
1755 return ret;
1756 }
1757
1758 static inline int armv7_pmnc_select_counter(unsigned int idx)
1759 {
1760 u32 val;
1761
1762 if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) {
1763 pr_err("CPU%u selecting wrong PMNC counter"
1764 " %d\n", smp_processor_id(), idx);
1765 return -1;
1766 }
1767
1768 val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK;
1769 asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val));
1770
1771 return idx;
1772 }
1773
1774 static inline u32 armv7pmu_read_counter(int idx)
1775 {
1776 unsigned long value = 0;
1777
1778 if (idx == ARMV7_CYCLE_COUNTER)
1779 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
1780 else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
1781 if (armv7_pmnc_select_counter(idx) == idx)
1782 asm volatile("mrc p15, 0, %0, c9, c13, 2"
1783 : "=r" (value));
1784 } else
1785 pr_err("CPU%u reading wrong counter %d\n",
1786 smp_processor_id(), idx);
1787
1788 return value;
1789 }
1790
1791 static inline void armv7pmu_write_counter(int idx, u32 value)
1792 {
1793 if (idx == ARMV7_CYCLE_COUNTER)
1794 asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
1795 else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
1796 if (armv7_pmnc_select_counter(idx) == idx)
1797 asm volatile("mcr p15, 0, %0, c9, c13, 2"
1798 : : "r" (value));
1799 } else
1800 pr_err("CPU%u writing wrong counter %d\n",
1801 smp_processor_id(), idx);
1802 }
1803
1804 static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val)
1805 {
1806 if (armv7_pmnc_select_counter(idx) == idx) {
1807 val &= ARMV7_EVTSEL_MASK;
1808 asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
1809 }
1810 }
1811
1812 static inline u32 armv7_pmnc_enable_counter(unsigned int idx)
1813 {
1814 u32 val;
1815
1816 if ((idx != ARMV7_CYCLE_COUNTER) &&
1817 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
1818 pr_err("CPU%u enabling wrong PMNC counter"
1819 " %d\n", smp_processor_id(), idx);
1820 return -1;
1821 }
1822
1823 if (idx == ARMV7_CYCLE_COUNTER)
1824 val = ARMV7_CNTENS_C;
1825 else
1826 val = ARMV7_CNTENS_P(idx);
1827
1828 asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
1829
1830 return idx;
1831 }
1832
1833 static inline u32 armv7_pmnc_disable_counter(unsigned int idx)
1834 {
1835 u32 val;
1836
1837
1838 if ((idx != ARMV7_CYCLE_COUNTER) &&
1839 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
1840 pr_err("CPU%u disabling wrong PMNC counter"
1841 " %d\n", smp_processor_id(), idx);
1842 return -1;
1843 }
1844
1845 if (idx == ARMV7_CYCLE_COUNTER)
1846 val = ARMV7_CNTENC_C;
1847 else
1848 val = ARMV7_CNTENC_P(idx);
1849
1850 asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
1851
1852 return idx;
1853 }
1854
1855 static inline u32 armv7_pmnc_enable_intens(unsigned int idx)
1856 {
1857 u32 val;
1858
1859 if ((idx != ARMV7_CYCLE_COUNTER) &&
1860 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
1861 pr_err("CPU%u enabling wrong PMNC counter"
1862 " interrupt enable %d\n", smp_processor_id(), idx);
1863 return -1;
1864 }
1865
1866 if (idx == ARMV7_CYCLE_COUNTER)
1867 val = ARMV7_INTENS_C;
1868 else
1869 val = ARMV7_INTENS_P(idx);
1870
1871 asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val));
1872
1873 return idx;
1874 }
1875
1876 static inline u32 armv7_pmnc_disable_intens(unsigned int idx)
1877 {
1878 u32 val;
1879
1880 if ((idx != ARMV7_CYCLE_COUNTER) &&
1881 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
1882 pr_err("CPU%u disabling wrong PMNC counter"
1883 " interrupt enable %d\n", smp_processor_id(), idx);
1884 return -1;
1885 }
1886
1887 if (idx == ARMV7_CYCLE_COUNTER)
1888 val = ARMV7_INTENC_C;
1889 else
1890 val = ARMV7_INTENC_P(idx);
1891
1892 asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val));
1893
1894 return idx;
1895 }
1896
1897 static inline u32 armv7_pmnc_getreset_flags(void)
1898 {
1899 u32 val;
1900
1901 /* Read */
1902 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
1903
1904 /* Write to clear flags */
1905 val &= ARMV7_FLAG_MASK;
1906 asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
1907
1908 return val;
1909 }
1910
1911 #ifdef DEBUG
1912 static void armv7_pmnc_dump_regs(void)
1913 {
1914 u32 val;
1915 unsigned int cnt;
1916
1917 printk(KERN_INFO "PMNC registers dump:\n");
1918
1919 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
1920 printk(KERN_INFO "PMNC =0x%08x\n", val);
1921
1922 asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
1923 printk(KERN_INFO "CNTENS=0x%08x\n", val);
1924
1925 asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
1926 printk(KERN_INFO "INTENS=0x%08x\n", val);
1927
1928 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
1929 printk(KERN_INFO "FLAGS =0x%08x\n", val);
1930
1931 asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
1932 printk(KERN_INFO "SELECT=0x%08x\n", val);
1933
1934 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
1935 printk(KERN_INFO "CCNT =0x%08x\n", val);
1936
1937 for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) {
1938 armv7_pmnc_select_counter(cnt);
1939 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
1940 printk(KERN_INFO "CNT[%d] count =0x%08x\n",
1941 cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
1942 asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
1943 printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
1944 cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
1945 }
1946 }
1947 #endif
1948
1949 void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
1950 {
1951 unsigned long flags;
1952
1953 /*
1954 * Enable counter and interrupt, and set the counter to count
1955 * the event that we're interested in.
1956 */
1957 spin_lock_irqsave(&pmu_lock, flags);
1958
1959 /*
1960 * Disable counter
1961 */
1962 armv7_pmnc_disable_counter(idx);
1963
1964 /*
1965 * Set event (if destined for PMNx counters)
1966 * We don't need to set the event if it's a cycle count
1967 */
1968 if (idx != ARMV7_CYCLE_COUNTER)
1969 armv7_pmnc_write_evtsel(idx, hwc->config_base);
1970
1971 /*
1972 * Enable interrupt for this counter
1973 */
1974 armv7_pmnc_enable_intens(idx);
1975
1976 /*
1977 * Enable counter
1978 */
1979 armv7_pmnc_enable_counter(idx);
1980
1981 spin_unlock_irqrestore(&pmu_lock, flags);
1982 }
1983
1984 static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
1985 {
1986 unsigned long flags;
1987
1988 /*
1989 * Disable counter and interrupt
1990 */
1991 spin_lock_irqsave(&pmu_lock, flags);
1992
1993 /*
1994 * Disable counter
1995 */
1996 armv7_pmnc_disable_counter(idx);
1997
1998 /*
1999 * Disable interrupt for this counter
2000 */
2001 armv7_pmnc_disable_intens(idx);
2002
2003 spin_unlock_irqrestore(&pmu_lock, flags);
2004 }
2005
2006 static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
2007 {
2008 unsigned long pmnc;
2009 struct perf_sample_data data;
2010 struct cpu_hw_events *cpuc;
2011 struct pt_regs *regs;
2012 int idx;
2013
2014 /*
2015 * Get and reset the IRQ flags
2016 */
2017 pmnc = armv7_pmnc_getreset_flags();
2018
2019 /*
2020 * Did an overflow occur?
2021 */
2022 if (!armv7_pmnc_has_overflowed(pmnc))
2023 return IRQ_NONE;
2024
2025 /*
2026 * Handle the counter(s) overflow(s)
2027 */
2028 regs = get_irq_regs();
2029
2030 perf_sample_data_init(&data, 0);
2031
2032 cpuc = &__get_cpu_var(cpu_hw_events);
2033 for (idx = 0; idx <= armpmu->num_events; ++idx) {
2034 struct perf_event *event = cpuc->events[idx];
2035 struct hw_perf_event *hwc;
2036
2037 if (!test_bit(idx, cpuc->active_mask))
2038 continue;
2039
2040 /*
2041 * We have a single interrupt for all counters. Check that
2042 * each counter has overflowed before we process it.
2043 */
2044 if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
2045 continue;
2046
2047 hwc = &event->hw;
2048 armpmu_event_update(event, hwc, idx);
2049 data.period = event->hw.last_period;
2050 if (!armpmu_event_set_period(event, hwc, idx))
2051 continue;
2052
2053 if (perf_event_overflow(event, 0, &data, regs))
2054 armpmu->disable(hwc, idx);
2055 }
2056
2057 /*
2058 * Handle the pending perf events.
2059 *
2060 * Note: this call *must* be run with interrupts disabled. For
2061 * platforms that can have the PMU interrupts raised as an NMI, this
2062 * will not work.
2063 */
2064 irq_work_run();
2065
2066 return IRQ_HANDLED;
2067 }
2068
2069 static void armv7pmu_start(void)
2070 {
2071 unsigned long flags;
2072
2073 spin_lock_irqsave(&pmu_lock, flags);
2074 /* Enable all counters */
2075 armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
2076 spin_unlock_irqrestore(&pmu_lock, flags);
2077 }
2078
2079 static void armv7pmu_stop(void)
2080 {
2081 unsigned long flags;
2082
2083 spin_lock_irqsave(&pmu_lock, flags);
2084 /* Disable all counters */
2085 armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
2086 spin_unlock_irqrestore(&pmu_lock, flags);
2087 }
2088
2089 static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc,
2090 struct hw_perf_event *event)
2091 {
2092 int idx;
2093
2094 /* Always place a cycle counter into the cycle counter. */
2095 if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) {
2096 if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask))
2097 return -EAGAIN;
2098
2099 return ARMV7_CYCLE_COUNTER;
2100 } else {
2101 /*
2102 * For anything other than a cycle counter, try and use
2103 * the events counters
2104 */
2105 for (idx = ARMV7_COUNTER0; idx <= armpmu->num_events; ++idx) {
2106 if (!test_and_set_bit(idx, cpuc->used_mask))
2107 return idx;
2108 }
2109
2110 /* The counters are all in use. */
2111 return -EAGAIN;
2112 }
2113 }
2114
2115 static struct arm_pmu armv7pmu = {
2116 .handle_irq = armv7pmu_handle_irq,
2117 .enable = armv7pmu_enable_event,
2118 .disable = armv7pmu_disable_event,
2119 .read_counter = armv7pmu_read_counter,
2120 .write_counter = armv7pmu_write_counter,
2121 .get_event_idx = armv7pmu_get_event_idx,
2122 .start = armv7pmu_start,
2123 .stop = armv7pmu_stop,
2124 .raw_event_mask = 0xFF,
2125 .max_period = (1LLU << 32) - 1,
2126 };
2127
2128 static u32 __init armv7_reset_read_pmnc(void)
2129 {
2130 u32 nb_cnt;
2131
2132 /* Initialize & Reset PMNC: C and P bits */
2133 armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
2134
2135 /* Read the nb of CNTx counters supported from PMNC */
2136 nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
2137
2138 /* Add the CPU cycles counter and return */
2139 return nb_cnt + 1;
2140 }
2141
2142 const struct arm_pmu *__init armv7_a8_pmu_init(void)
2143 {
2144 armv7pmu.id = ARM_PERF_PMU_ID_CA8;
2145 armv7pmu.name = "ARMv7 Cortex-A8";
2146 armv7pmu.cache_map = &armv7_a8_perf_cache_map;
2147 armv7pmu.event_map = &armv7_a8_perf_map;
2148 armv7pmu.num_events = armv7_reset_read_pmnc();
2149 return &armv7pmu;
2150 }
2151
2152 const struct arm_pmu *__init armv7_a9_pmu_init(void)
2153 {
2154 armv7pmu.id = ARM_PERF_PMU_ID_CA9;
2155 armv7pmu.name = "ARMv7 Cortex-A9";
2156 armv7pmu.cache_map = &armv7_a9_perf_cache_map;
2157 armv7pmu.event_map = &armv7_a9_perf_map;
2158 armv7pmu.num_events = armv7_reset_read_pmnc();
2159 return &armv7pmu;
2160 }
2161
2162
2163 /*
2164 * ARMv5 [xscale] Performance counter handling code.
2165 *
2166 * Based on xscale OProfile code.
2167 *
2168 * There are two variants of the xscale PMU that we support:
2169 * - xscale1pmu: 2 event counters and a cycle counter
2170 * - xscale2pmu: 4 event counters and a cycle counter
2171 * The two variants share event definitions, but have different
2172 * PMU structures.
2173 */
2174
2175 enum xscale_perf_types {
2176 XSCALE_PERFCTR_ICACHE_MISS = 0x00,
2177 XSCALE_PERFCTR_ICACHE_NO_DELIVER = 0x01,
2178 XSCALE_PERFCTR_DATA_STALL = 0x02,
2179 XSCALE_PERFCTR_ITLB_MISS = 0x03,
2180 XSCALE_PERFCTR_DTLB_MISS = 0x04,
2181 XSCALE_PERFCTR_BRANCH = 0x05,
2182 XSCALE_PERFCTR_BRANCH_MISS = 0x06,
2183 XSCALE_PERFCTR_INSTRUCTION = 0x07,
2184 XSCALE_PERFCTR_DCACHE_FULL_STALL = 0x08,
2185 XSCALE_PERFCTR_DCACHE_FULL_STALL_CONTIG = 0x09,
2186 XSCALE_PERFCTR_DCACHE_ACCESS = 0x0A,
2187 XSCALE_PERFCTR_DCACHE_MISS = 0x0B,
2188 XSCALE_PERFCTR_DCACHE_WRITE_BACK = 0x0C,
2189 XSCALE_PERFCTR_PC_CHANGED = 0x0D,
2190 XSCALE_PERFCTR_BCU_REQUEST = 0x10,
2191 XSCALE_PERFCTR_BCU_FULL = 0x11,
2192 XSCALE_PERFCTR_BCU_DRAIN = 0x12,
2193 XSCALE_PERFCTR_BCU_ECC_NO_ELOG = 0x14,
2194 XSCALE_PERFCTR_BCU_1_BIT_ERR = 0x15,
2195 XSCALE_PERFCTR_RMW = 0x16,
2196 /* XSCALE_PERFCTR_CCNT is not hardware defined */
2197 XSCALE_PERFCTR_CCNT = 0xFE,
2198 XSCALE_PERFCTR_UNUSED = 0xFF,
2199 };
2200
2201 enum xscale_counters {
2202 XSCALE_CYCLE_COUNTER = 1,
2203 XSCALE_COUNTER0,
2204 XSCALE_COUNTER1,
2205 XSCALE_COUNTER2,
2206 XSCALE_COUNTER3,
2207 };
2208
2209 static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = {
2210 [PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT,
2211 [PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION,
2212 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
2213 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
2214 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH,
2215 [PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS,
2216 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
2217 };
2218
2219 static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
2220 [PERF_COUNT_HW_CACHE_OP_MAX]
2221 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2222 [C(L1D)] = {
2223 [C(OP_READ)] = {
2224 [C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
2225 [C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
2226 },
2227 [C(OP_WRITE)] = {
2228 [C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
2229 [C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
2230 },
2231 [C(OP_PREFETCH)] = {
2232 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2233 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
2234 },
2235 },
2236 [C(L1I)] = {
2237 [C(OP_READ)] = {
2238 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2239 [C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS,
2240 },
2241 [C(OP_WRITE)] = {
2242 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2243 [C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS,
2244 },
2245 [C(OP_PREFETCH)] = {
2246 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2247 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
2248 },
2249 },
2250 [C(LL)] = {
2251 [C(OP_READ)] = {
2252 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2253 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
2254 },
2255 [C(OP_WRITE)] = {
2256 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2257 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
2258 },
2259 [C(OP_PREFETCH)] = {
2260 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2261 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
2262 },
2263 },
2264 [C(DTLB)] = {
2265 [C(OP_READ)] = {
2266 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2267 [C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
2268 },
2269 [C(OP_WRITE)] = {
2270 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2271 [C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
2272 },
2273 [C(OP_PREFETCH)] = {
2274 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2275 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
2276 },
2277 },
2278 [C(ITLB)] = {
2279 [C(OP_READ)] = {
2280 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2281 [C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
2282 },
2283 [C(OP_WRITE)] = {
2284 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2285 [C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
2286 },
2287 [C(OP_PREFETCH)] = {
2288 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2289 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
2290 },
2291 },
2292 [C(BPU)] = {
2293 [C(OP_READ)] = {
2294 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2295 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
2296 },
2297 [C(OP_WRITE)] = {
2298 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2299 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
2300 },
2301 [C(OP_PREFETCH)] = {
2302 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
2303 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
2304 },
2305 },
2306 };
2307
2308 #define XSCALE_PMU_ENABLE 0x001
2309 #define XSCALE_PMN_RESET 0x002
2310 #define XSCALE_CCNT_RESET 0x004
2311 #define XSCALE_PMU_RESET (CCNT_RESET | PMN_RESET)
2312 #define XSCALE_PMU_CNT64 0x008
2313
2314 #define XSCALE1_OVERFLOWED_MASK 0x700
2315 #define XSCALE1_CCOUNT_OVERFLOW 0x400
2316 #define XSCALE1_COUNT0_OVERFLOW 0x100
2317 #define XSCALE1_COUNT1_OVERFLOW 0x200
2318 #define XSCALE1_CCOUNT_INT_EN 0x040
2319 #define XSCALE1_COUNT0_INT_EN 0x010
2320 #define XSCALE1_COUNT1_INT_EN 0x020
2321 #define XSCALE1_COUNT0_EVT_SHFT 12
2322 #define XSCALE1_COUNT0_EVT_MASK (0xff << XSCALE1_COUNT0_EVT_SHFT)
2323 #define XSCALE1_COUNT1_EVT_SHFT 20
2324 #define XSCALE1_COUNT1_EVT_MASK (0xff << XSCALE1_COUNT1_EVT_SHFT)
2325
2326 static inline u32
2327 xscale1pmu_read_pmnc(void)
2328 {
2329 u32 val;
2330 asm volatile("mrc p14, 0, %0, c0, c0, 0" : "=r" (val));
2331 return val;
2332 }
2333
2334 static inline void
2335 xscale1pmu_write_pmnc(u32 val)
2336 {
2337 /* upper 4bits and 7, 11 are write-as-0 */
2338 val &= 0xffff77f;
2339 asm volatile("mcr p14, 0, %0, c0, c0, 0" : : "r" (val));
2340 }
2341
2342 static inline int
2343 xscale1_pmnc_counter_has_overflowed(unsigned long pmnc,
2344 enum xscale_counters counter)
2345 {
2346 int ret = 0;
2347
2348 switch (counter) {
2349 case XSCALE_CYCLE_COUNTER:
2350 ret = pmnc & XSCALE1_CCOUNT_OVERFLOW;
2351 break;
2352 case XSCALE_COUNTER0:
2353 ret = pmnc & XSCALE1_COUNT0_OVERFLOW;
2354 break;
2355 case XSCALE_COUNTER1:
2356 ret = pmnc & XSCALE1_COUNT1_OVERFLOW;
2357 break;
2358 default:
2359 WARN_ONCE(1, "invalid counter number (%d)\n", counter);
2360 }
2361
2362 return ret;
2363 }
2364
2365 static irqreturn_t
2366 xscale1pmu_handle_irq(int irq_num, void *dev)
2367 {
2368 unsigned long pmnc;
2369 struct perf_sample_data data;
2370 struct cpu_hw_events *cpuc;
2371 struct pt_regs *regs;
2372 int idx;
2373
2374 /*
2375 * NOTE: there's an A stepping erratum that states if an overflow
2376 * bit already exists and another occurs, the previous
2377 * Overflow bit gets cleared. There's no workaround.
2378 * Fixed in B stepping or later.
2379 */
2380 pmnc = xscale1pmu_read_pmnc();
2381
2382 /*
2383 * Write the value back to clear the overflow flags. Overflow
2384 * flags remain in pmnc for use below. We also disable the PMU
2385 * while we process the interrupt.
2386 */
2387 xscale1pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
2388
2389 if (!(pmnc & XSCALE1_OVERFLOWED_MASK))
2390 return IRQ_NONE;
2391
2392 regs = get_irq_regs();
2393
2394 perf_sample_data_init(&data, 0);
2395
2396 cpuc = &__get_cpu_var(cpu_hw_events);
2397 for (idx = 0; idx <= armpmu->num_events; ++idx) {
2398 struct perf_event *event = cpuc->events[idx];
2399 struct hw_perf_event *hwc;
2400
2401 if (!test_bit(idx, cpuc->active_mask))
2402 continue;
2403
2404 if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
2405 continue;
2406
2407 hwc = &event->hw;
2408 armpmu_event_update(event, hwc, idx);
2409 data.period = event->hw.last_period;
2410 if (!armpmu_event_set_period(event, hwc, idx))
2411 continue;
2412
2413 if (perf_event_overflow(event, 0, &data, regs))
2414 armpmu->disable(hwc, idx);
2415 }
2416
2417 irq_work_run();
2418
2419 /*
2420 * Re-enable the PMU.
2421 */
2422 pmnc = xscale1pmu_read_pmnc() | XSCALE_PMU_ENABLE;
2423 xscale1pmu_write_pmnc(pmnc);
2424
2425 return IRQ_HANDLED;
2426 }
2427
2428 static void
2429 xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
2430 {
2431 unsigned long val, mask, evt, flags;
2432
2433 switch (idx) {
2434 case XSCALE_CYCLE_COUNTER:
2435 mask = 0;
2436 evt = XSCALE1_CCOUNT_INT_EN;
2437 break;
2438 case XSCALE_COUNTER0:
2439 mask = XSCALE1_COUNT0_EVT_MASK;
2440 evt = (hwc->config_base << XSCALE1_COUNT0_EVT_SHFT) |
2441 XSCALE1_COUNT0_INT_EN;
2442 break;
2443 case XSCALE_COUNTER1:
2444 mask = XSCALE1_COUNT1_EVT_MASK;
2445 evt = (hwc->config_base << XSCALE1_COUNT1_EVT_SHFT) |
2446 XSCALE1_COUNT1_INT_EN;
2447 break;
2448 default:
2449 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
2450 return;
2451 }
2452
2453 spin_lock_irqsave(&pmu_lock, flags);
2454 val = xscale1pmu_read_pmnc();
2455 val &= ~mask;
2456 val |= evt;
2457 xscale1pmu_write_pmnc(val);
2458 spin_unlock_irqrestore(&pmu_lock, flags);
2459 }
2460
2461 static void
2462 xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
2463 {
2464 unsigned long val, mask, evt, flags;
2465
2466 switch (idx) {
2467 case XSCALE_CYCLE_COUNTER:
2468 mask = XSCALE1_CCOUNT_INT_EN;
2469 evt = 0;
2470 break;
2471 case XSCALE_COUNTER0:
2472 mask = XSCALE1_COUNT0_INT_EN | XSCALE1_COUNT0_EVT_MASK;
2473 evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT0_EVT_SHFT;
2474 break;
2475 case XSCALE_COUNTER1:
2476 mask = XSCALE1_COUNT1_INT_EN | XSCALE1_COUNT1_EVT_MASK;
2477 evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT1_EVT_SHFT;
2478 break;
2479 default:
2480 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
2481 return;
2482 }
2483
2484 spin_lock_irqsave(&pmu_lock, flags);
2485 val = xscale1pmu_read_pmnc();
2486 val &= ~mask;
2487 val |= evt;
2488 xscale1pmu_write_pmnc(val);
2489 spin_unlock_irqrestore(&pmu_lock, flags);
2490 }
2491
2492 static int
2493 xscale1pmu_get_event_idx(struct cpu_hw_events *cpuc,
2494 struct hw_perf_event *event)
2495 {
2496 if (XSCALE_PERFCTR_CCNT == event->config_base) {
2497 if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask))
2498 return -EAGAIN;
2499
2500 return XSCALE_CYCLE_COUNTER;
2501 } else {
2502 if (!test_and_set_bit(XSCALE_COUNTER1, cpuc->used_mask)) {
2503 return XSCALE_COUNTER1;
2504 }
2505
2506 if (!test_and_set_bit(XSCALE_COUNTER0, cpuc->used_mask)) {
2507 return XSCALE_COUNTER0;
2508 }
2509
2510 return -EAGAIN;
2511 }
2512 }
2513
2514 static void
2515 xscale1pmu_start(void)
2516 {
2517 unsigned long flags, val;
2518
2519 spin_lock_irqsave(&pmu_lock, flags);
2520 val = xscale1pmu_read_pmnc();
2521 val |= XSCALE_PMU_ENABLE;
2522 xscale1pmu_write_pmnc(val);
2523 spin_unlock_irqrestore(&pmu_lock, flags);
2524 }
2525
2526 static void
2527 xscale1pmu_stop(void)
2528 {
2529 unsigned long flags, val;
2530
2531 spin_lock_irqsave(&pmu_lock, flags);
2532 val = xscale1pmu_read_pmnc();
2533 val &= ~XSCALE_PMU_ENABLE;
2534 xscale1pmu_write_pmnc(val);
2535 spin_unlock_irqrestore(&pmu_lock, flags);
2536 }
2537
2538 static inline u32
2539 xscale1pmu_read_counter(int counter)
2540 {
2541 u32 val = 0;
2542
2543 switch (counter) {
2544 case XSCALE_CYCLE_COUNTER:
2545 asm volatile("mrc p14, 0, %0, c1, c0, 0" : "=r" (val));
2546 break;
2547 case XSCALE_COUNTER0:
2548 asm volatile("mrc p14, 0, %0, c2, c0, 0" : "=r" (val));
2549 break;
2550 case XSCALE_COUNTER1:
2551 asm volatile("mrc p14, 0, %0, c3, c0, 0" : "=r" (val));
2552 break;
2553 }
2554
2555 return val;
2556 }
2557
2558 static inline void
2559 xscale1pmu_write_counter(int counter, u32 val)
2560 {
2561 switch (counter) {
2562 case XSCALE_CYCLE_COUNTER:
2563 asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val));
2564 break;
2565 case XSCALE_COUNTER0:
2566 asm volatile("mcr p14, 0, %0, c2, c0, 0" : : "r" (val));
2567 break;
2568 case XSCALE_COUNTER1:
2569 asm volatile("mcr p14, 0, %0, c3, c0, 0" : : "r" (val));
2570 break;
2571 }
2572 }
2573
2574 static const struct arm_pmu xscale1pmu = {
2575 .id = ARM_PERF_PMU_ID_XSCALE1,
2576 .name = "xscale1",
2577 .handle_irq = xscale1pmu_handle_irq,
2578 .enable = xscale1pmu_enable_event,
2579 .disable = xscale1pmu_disable_event,
2580 .read_counter = xscale1pmu_read_counter,
2581 .write_counter = xscale1pmu_write_counter,
2582 .get_event_idx = xscale1pmu_get_event_idx,
2583 .start = xscale1pmu_start,
2584 .stop = xscale1pmu_stop,
2585 .cache_map = &xscale_perf_cache_map,
2586 .event_map = &xscale_perf_map,
2587 .raw_event_mask = 0xFF,
2588 .num_events = 3,
2589 .max_period = (1LLU << 32) - 1,
2590 };
2591
2592 const struct arm_pmu *__init xscale1pmu_init(void)
2593 {
2594 return &xscale1pmu;
2595 }
2596
2597 #define XSCALE2_OVERFLOWED_MASK 0x01f
2598 #define XSCALE2_CCOUNT_OVERFLOW 0x001
2599 #define XSCALE2_COUNT0_OVERFLOW 0x002
2600 #define XSCALE2_COUNT1_OVERFLOW 0x004
2601 #define XSCALE2_COUNT2_OVERFLOW 0x008
2602 #define XSCALE2_COUNT3_OVERFLOW 0x010
2603 #define XSCALE2_CCOUNT_INT_EN 0x001
2604 #define XSCALE2_COUNT0_INT_EN 0x002
2605 #define XSCALE2_COUNT1_INT_EN 0x004
2606 #define XSCALE2_COUNT2_INT_EN 0x008
2607 #define XSCALE2_COUNT3_INT_EN 0x010
2608 #define XSCALE2_COUNT0_EVT_SHFT 0
2609 #define XSCALE2_COUNT0_EVT_MASK (0xff << XSCALE2_COUNT0_EVT_SHFT)
2610 #define XSCALE2_COUNT1_EVT_SHFT 8
2611 #define XSCALE2_COUNT1_EVT_MASK (0xff << XSCALE2_COUNT1_EVT_SHFT)
2612 #define XSCALE2_COUNT2_EVT_SHFT 16
2613 #define XSCALE2_COUNT2_EVT_MASK (0xff << XSCALE2_COUNT2_EVT_SHFT)
2614 #define XSCALE2_COUNT3_EVT_SHFT 24
2615 #define XSCALE2_COUNT3_EVT_MASK (0xff << XSCALE2_COUNT3_EVT_SHFT)
2616
2617 static inline u32
2618 xscale2pmu_read_pmnc(void)
2619 {
2620 u32 val;
2621 asm volatile("mrc p14, 0, %0, c0, c1, 0" : "=r" (val));
2622 /* bits 1-2 and 4-23 are read-unpredictable */
2623 return val & 0xff000009;
2624 }
2625
2626 static inline void
2627 xscale2pmu_write_pmnc(u32 val)
2628 {
2629 /* bits 4-23 are write-as-0, 24-31 are write ignored */
2630 val &= 0xf;
2631 asm volatile("mcr p14, 0, %0, c0, c1, 0" : : "r" (val));
2632 }
2633
2634 static inline u32
2635 xscale2pmu_read_overflow_flags(void)
2636 {
2637 u32 val;
2638 asm volatile("mrc p14, 0, %0, c5, c1, 0" : "=r" (val));
2639 return val;
2640 }
2641
2642 static inline void
2643 xscale2pmu_write_overflow_flags(u32 val)
2644 {
2645 asm volatile("mcr p14, 0, %0, c5, c1, 0" : : "r" (val));
2646 }
2647
2648 static inline u32
2649 xscale2pmu_read_event_select(void)
2650 {
2651 u32 val;
2652 asm volatile("mrc p14, 0, %0, c8, c1, 0" : "=r" (val));
2653 return val;
2654 }
2655
2656 static inline void
2657 xscale2pmu_write_event_select(u32 val)
2658 {
2659 asm volatile("mcr p14, 0, %0, c8, c1, 0" : : "r"(val));
2660 }
2661
2662 static inline u32
2663 xscale2pmu_read_int_enable(void)
2664 {
2665 u32 val;
2666 asm volatile("mrc p14, 0, %0, c4, c1, 0" : "=r" (val));
2667 return val;
2668 }
2669
2670 static void
2671 xscale2pmu_write_int_enable(u32 val)
2672 {
2673 asm volatile("mcr p14, 0, %0, c4, c1, 0" : : "r" (val));
2674 }
2675
2676 static inline int
2677 xscale2_pmnc_counter_has_overflowed(unsigned long of_flags,
2678 enum xscale_counters counter)
2679 {
2680 int ret = 0;
2681
2682 switch (counter) {
2683 case XSCALE_CYCLE_COUNTER:
2684 ret = of_flags & XSCALE2_CCOUNT_OVERFLOW;
2685 break;
2686 case XSCALE_COUNTER0:
2687 ret = of_flags & XSCALE2_COUNT0_OVERFLOW;
2688 break;
2689 case XSCALE_COUNTER1:
2690 ret = of_flags & XSCALE2_COUNT1_OVERFLOW;
2691 break;
2692 case XSCALE_COUNTER2:
2693 ret = of_flags & XSCALE2_COUNT2_OVERFLOW;
2694 break;
2695 case XSCALE_COUNTER3:
2696 ret = of_flags & XSCALE2_COUNT3_OVERFLOW;
2697 break;
2698 default:
2699 WARN_ONCE(1, "invalid counter number (%d)\n", counter);
2700 }
2701
2702 return ret;
2703 }
2704
2705 static irqreturn_t
2706 xscale2pmu_handle_irq(int irq_num, void *dev)
2707 {
2708 unsigned long pmnc, of_flags;
2709 struct perf_sample_data data;
2710 struct cpu_hw_events *cpuc;
2711 struct pt_regs *regs;
2712 int idx;
2713
2714 /* Disable the PMU. */
2715 pmnc = xscale2pmu_read_pmnc();
2716 xscale2pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
2717
2718 /* Check the overflow flag register. */
2719 of_flags = xscale2pmu_read_overflow_flags();
2720 if (!(of_flags & XSCALE2_OVERFLOWED_MASK))
2721 return IRQ_NONE;
2722
2723 /* Clear the overflow bits. */
2724 xscale2pmu_write_overflow_flags(of_flags);
2725
2726 regs = get_irq_regs();
2727
2728 perf_sample_data_init(&data, 0);
2729
2730 cpuc = &__get_cpu_var(cpu_hw_events);
2731 for (idx = 0; idx <= armpmu->num_events; ++idx) {
2732 struct perf_event *event = cpuc->events[idx];
2733 struct hw_perf_event *hwc;
2734
2735 if (!test_bit(idx, cpuc->active_mask))
2736 continue;
2737
2738 if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
2739 continue;
2740
2741 hwc = &event->hw;
2742 armpmu_event_update(event, hwc, idx);
2743 data.period = event->hw.last_period;
2744 if (!armpmu_event_set_period(event, hwc, idx))
2745 continue;
2746
2747 if (perf_event_overflow(event, 0, &data, regs))
2748 armpmu->disable(hwc, idx);
2749 }
2750
2751 irq_work_run();
2752
2753 /*
2754 * Re-enable the PMU.
2755 */
2756 pmnc = xscale2pmu_read_pmnc() | XSCALE_PMU_ENABLE;
2757 xscale2pmu_write_pmnc(pmnc);
2758
2759 return IRQ_HANDLED;
2760 }
2761
2762 static void
2763 xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
2764 {
2765 unsigned long flags, ien, evtsel;
2766
2767 ien = xscale2pmu_read_int_enable();
2768 evtsel = xscale2pmu_read_event_select();
2769
2770 switch (idx) {
2771 case XSCALE_CYCLE_COUNTER:
2772 ien |= XSCALE2_CCOUNT_INT_EN;
2773 break;
2774 case XSCALE_COUNTER0:
2775 ien |= XSCALE2_COUNT0_INT_EN;
2776 evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
2777 evtsel |= hwc->config_base << XSCALE2_COUNT0_EVT_SHFT;
2778 break;
2779 case XSCALE_COUNTER1:
2780 ien |= XSCALE2_COUNT1_INT_EN;
2781 evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
2782 evtsel |= hwc->config_base << XSCALE2_COUNT1_EVT_SHFT;
2783 break;
2784 case XSCALE_COUNTER2:
2785 ien |= XSCALE2_COUNT2_INT_EN;
2786 evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
2787 evtsel |= hwc->config_base << XSCALE2_COUNT2_EVT_SHFT;
2788 break;
2789 case XSCALE_COUNTER3:
2790 ien |= XSCALE2_COUNT3_INT_EN;
2791 evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
2792 evtsel |= hwc->config_base << XSCALE2_COUNT3_EVT_SHFT;
2793 break;
2794 default:
2795 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
2796 return;
2797 }
2798
2799 spin_lock_irqsave(&pmu_lock, flags);
2800 xscale2pmu_write_event_select(evtsel);
2801 xscale2pmu_write_int_enable(ien);
2802 spin_unlock_irqrestore(&pmu_lock, flags);
2803 }
2804
2805 static void
2806 xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
2807 {
2808 unsigned long flags, ien, evtsel;
2809
2810 ien = xscale2pmu_read_int_enable();
2811 evtsel = xscale2pmu_read_event_select();
2812
2813 switch (idx) {
2814 case XSCALE_CYCLE_COUNTER:
2815 ien &= ~XSCALE2_CCOUNT_INT_EN;
2816 break;
2817 case XSCALE_COUNTER0:
2818 ien &= ~XSCALE2_COUNT0_INT_EN;
2819 evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
2820 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
2821 break;
2822 case XSCALE_COUNTER1:
2823 ien &= ~XSCALE2_COUNT1_INT_EN;
2824 evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
2825 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
2826 break;
2827 case XSCALE_COUNTER2:
2828 ien &= ~XSCALE2_COUNT2_INT_EN;
2829 evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
2830 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
2831 break;
2832 case XSCALE_COUNTER3:
2833 ien &= ~XSCALE2_COUNT3_INT_EN;
2834 evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
2835 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
2836 break;
2837 default:
2838 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
2839 return;
2840 }
2841
2842 spin_lock_irqsave(&pmu_lock, flags);
2843 xscale2pmu_write_event_select(evtsel);
2844 xscale2pmu_write_int_enable(ien);
2845 spin_unlock_irqrestore(&pmu_lock, flags);
2846 }
2847
2848 static int
2849 xscale2pmu_get_event_idx(struct cpu_hw_events *cpuc,
2850 struct hw_perf_event *event)
2851 {
2852 int idx = xscale1pmu_get_event_idx(cpuc, event);
2853 if (idx >= 0)
2854 goto out;
2855
2856 if (!test_and_set_bit(XSCALE_COUNTER3, cpuc->used_mask))
2857 idx = XSCALE_COUNTER3;
2858 else if (!test_and_set_bit(XSCALE_COUNTER2, cpuc->used_mask))
2859 idx = XSCALE_COUNTER2;
2860 out:
2861 return idx;
2862 }
2863
2864 static void
2865 xscale2pmu_start(void)
2866 {
2867 unsigned long flags, val;
2868
2869 spin_lock_irqsave(&pmu_lock, flags);
2870 val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
2871 val |= XSCALE_PMU_ENABLE;
2872 xscale2pmu_write_pmnc(val);
2873 spin_unlock_irqrestore(&pmu_lock, flags);
2874 }
2875
2876 static void
2877 xscale2pmu_stop(void)
2878 {
2879 unsigned long flags, val;
2880
2881 spin_lock_irqsave(&pmu_lock, flags);
2882 val = xscale2pmu_read_pmnc();
2883 val &= ~XSCALE_PMU_ENABLE;
2884 xscale2pmu_write_pmnc(val);
2885 spin_unlock_irqrestore(&pmu_lock, flags);
2886 }
2887
2888 static inline u32
2889 xscale2pmu_read_counter(int counter)
2890 {
2891 u32 val = 0;
2892
2893 switch (counter) {
2894 case XSCALE_CYCLE_COUNTER:
2895 asm volatile("mrc p14, 0, %0, c1, c1, 0" : "=r" (val));
2896 break;
2897 case XSCALE_COUNTER0:
2898 asm volatile("mrc p14, 0, %0, c0, c2, 0" : "=r" (val));
2899 break;
2900 case XSCALE_COUNTER1:
2901 asm volatile("mrc p14, 0, %0, c1, c2, 0" : "=r" (val));
2902 break;
2903 case XSCALE_COUNTER2:
2904 asm volatile("mrc p14, 0, %0, c2, c2, 0" : "=r" (val));
2905 break;
2906 case XSCALE_COUNTER3:
2907 asm volatile("mrc p14, 0, %0, c3, c2, 0" : "=r" (val));
2908 break;
2909 }
2910
2911 return val;
2912 }
2913
2914 static inline void
2915 xscale2pmu_write_counter(int counter, u32 val)
2916 {
2917 switch (counter) {
2918 case XSCALE_CYCLE_COUNTER:
2919 asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val));
2920 break;
2921 case XSCALE_COUNTER0:
2922 asm volatile("mcr p14, 0, %0, c0, c2, 0" : : "r" (val));
2923 break;
2924 case XSCALE_COUNTER1:
2925 asm volatile("mcr p14, 0, %0, c1, c2, 0" : : "r" (val));
2926 break;
2927 case XSCALE_COUNTER2:
2928 asm volatile("mcr p14, 0, %0, c2, c2, 0" : : "r" (val));
2929 break;
2930 case XSCALE_COUNTER3:
2931 asm volatile("mcr p14, 0, %0, c3, c2, 0" : : "r" (val));
2932 break;
2933 }
2934 }
2935
2936 static const struct arm_pmu xscale2pmu = {
2937 .id = ARM_PERF_PMU_ID_XSCALE2,
2938 .name = "xscale2",
2939 .handle_irq = xscale2pmu_handle_irq,
2940 .enable = xscale2pmu_enable_event,
2941 .disable = xscale2pmu_disable_event,
2942 .read_counter = xscale2pmu_read_counter,
2943 .write_counter = xscale2pmu_write_counter,
2944 .get_event_idx = xscale2pmu_get_event_idx,
2945 .start = xscale2pmu_start,
2946 .stop = xscale2pmu_stop,
2947 .cache_map = &xscale_perf_cache_map,
2948 .event_map = &xscale_perf_map,
2949 .raw_event_mask = 0xFF,
2950 .num_events = 5,
2951 .max_period = (1LLU << 32) - 1,
2952 };
2953
2954 const struct arm_pmu *__init xscale2pmu_init(void)
2955 {
2956 return &xscale2pmu;
2957 }
2958
2959 static int __init
2960 init_hw_perf_events(void)
2961 {
2962 unsigned long cpuid = read_cpuid_id();
2963 unsigned long implementor = (cpuid & 0xFF000000) >> 24;
2964 unsigned long part_number = (cpuid & 0xFFF0);
2965
2966 /* ARM Ltd CPUs. */
2967 if (0x41 == implementor) {
2968 switch (part_number) {
2969 case 0xB360: /* ARM1136 */
2970 case 0xB560: /* ARM1156 */
2971 case 0xB760: /* ARM1176 */
2972 armpmu = armv6pmu_init();
2973 break;
2974 case 0xB020: /* ARM11mpcore */
2975 armpmu = armv6mpcore_pmu_init();
2976 break;
2977 case 0xC080: /* Cortex-A8 */
2978 armpmu = armv7_a8_pmu_init();
2979 break;
2980 case 0xC090: /* Cortex-A9 */
2981 armpmu = armv7_a9_pmu_init();
2982 break;
2983 }
2984 /* Intel CPUs [xscale]. */
2985 } else if (0x69 == implementor) {
2986 part_number = (cpuid >> 13) & 0x7;
2987 switch (part_number) {
2988 case 1:
2989 armpmu = xscale1pmu_init();
2990 break;
2991 case 2:
2992 armpmu = xscale2pmu_init();
2993 break;
2994 }
2995 }
2996
2997 if (armpmu) {
2998 pr_info("enabled with %s PMU driver, %d counters available\n",
2999 armpmu->name, armpmu->num_events);
3000 } else {
3001 pr_info("no hardware support available\n");
3002 }
3003
3004 perf_pmu_register(&pmu);
3005
3006 return 0;
3007 }
3008 arch_initcall(init_hw_perf_events);
3009
3010 /*
3011 * Callchain handling code.
3012 */
3013
3014 /*
3015 * The registers we're interested in are at the end of the variable
3016 * length saved register structure. The fp points at the end of this
3017 * structure so the address of this struct is:
3018 * (struct frame_tail *)(xxx->fp)-1
3019 *
3020 * This code has been adapted from the ARM OProfile support.
3021 */
3022 struct frame_tail {
3023 struct frame_tail *fp;
3024 unsigned long sp;
3025 unsigned long lr;
3026 } __attribute__((packed));
3027
3028 /*
3029 * Get the return address for a single stackframe and return a pointer to the
3030 * next frame tail.
3031 */
3032 static struct frame_tail *
3033 user_backtrace(struct frame_tail *tail,
3034 struct perf_callchain_entry *entry)
3035 {
3036 struct frame_tail buftail;
3037
3038 /* Also check accessibility of one struct frame_tail beyond */
3039 if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
3040 return NULL;
3041 if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
3042 return NULL;
3043
3044 perf_callchain_store(entry, buftail.lr);
3045
3046 /*
3047 * Frame pointers should strictly progress back up the stack
3048 * (towards higher addresses).
3049 */
3050 if (tail >= buftail.fp)
3051 return NULL;
3052
3053 return buftail.fp - 1;
3054 }
3055
3056 void
3057 perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
3058 {
3059 struct frame_tail *tail;
3060
3061
3062 tail = (struct frame_tail *)regs->ARM_fp - 1;
3063
3064 while (tail && !((unsigned long)tail & 0x3))
3065 tail = user_backtrace(tail, entry);
3066 }
3067
3068 /*
3069 * Gets called by walk_stackframe() for every stackframe. This will be called
3070 * whist unwinding the stackframe and is like a subroutine return so we use
3071 * the PC.
3072 */
3073 static int
3074 callchain_trace(struct stackframe *fr,
3075 void *data)
3076 {
3077 struct perf_callchain_entry *entry = data;
3078 perf_callchain_store(entry, fr->pc);
3079 return 0;
3080 }
3081
3082 void
3083 perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
3084 {
3085 struct stackframe fr;
3086
3087 fr.fp = regs->ARM_fp;
3088 fr.sp = regs->ARM_sp;
3089 fr.lr = regs->ARM_lr;
3090 fr.pc = regs->ARM_pc;
3091 walk_stackframe(&fr, callchain_trace, entry);
3092 }
This page took 0.094669 seconds and 5 git commands to generate.