arm64: perf: move to shared arm_pmu framework
[deliverable/linux.git] / arch / arm64 / kernel / perf_event.c
CommitLineData
03089688
WD
1/*
2 * PMU support
3 *
4 * Copyright (C) 2012 ARM Limited
5 * Author: Will Deacon <will.deacon@arm.com>
6 *
7 * This code is based heavily on the ARMv7 perf event code.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
03089688 21
03089688 22#include <asm/irq_regs.h>
03089688 23
6475b2d8
MR
24#include <linux/of.h>
25#include <linux/perf/arm_pmu.h>
26#include <linux/platform_device.h>
03089688
WD
27
28/*
29 * ARMv8 PMUv3 Performance Events handling code.
30 * Common event types.
31 */
32enum armv8_pmuv3_perf_types {
33 /* Required events. */
34 ARMV8_PMUV3_PERFCTR_PMNC_SW_INCR = 0x00,
35 ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL = 0x03,
36 ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS = 0x04,
37 ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
38 ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES = 0x11,
39 ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED = 0x12,
40
41 /* At least one of the following is required. */
42 ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED = 0x08,
43 ARMV8_PMUV3_PERFCTR_OP_SPEC = 0x1B,
44
45 /* Common architectural events. */
46 ARMV8_PMUV3_PERFCTR_MEM_READ = 0x06,
47 ARMV8_PMUV3_PERFCTR_MEM_WRITE = 0x07,
48 ARMV8_PMUV3_PERFCTR_EXC_TAKEN = 0x09,
49 ARMV8_PMUV3_PERFCTR_EXC_EXECUTED = 0x0A,
50 ARMV8_PMUV3_PERFCTR_CID_WRITE = 0x0B,
51 ARMV8_PMUV3_PERFCTR_PC_WRITE = 0x0C,
52 ARMV8_PMUV3_PERFCTR_PC_IMM_BRANCH = 0x0D,
53 ARMV8_PMUV3_PERFCTR_PC_PROC_RETURN = 0x0E,
54 ARMV8_PMUV3_PERFCTR_MEM_UNALIGNED_ACCESS = 0x0F,
55 ARMV8_PMUV3_PERFCTR_TTBR_WRITE = 0x1C,
56
57 /* Common microarchitectural events. */
58 ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL = 0x01,
59 ARMV8_PMUV3_PERFCTR_ITLB_REFILL = 0x02,
60 ARMV8_PMUV3_PERFCTR_DTLB_REFILL = 0x05,
61 ARMV8_PMUV3_PERFCTR_MEM_ACCESS = 0x13,
62 ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS = 0x14,
63 ARMV8_PMUV3_PERFCTR_L1_DCACHE_WB = 0x15,
64 ARMV8_PMUV3_PERFCTR_L2_CACHE_ACCESS = 0x16,
65 ARMV8_PMUV3_PERFCTR_L2_CACHE_REFILL = 0x17,
66 ARMV8_PMUV3_PERFCTR_L2_CACHE_WB = 0x18,
67 ARMV8_PMUV3_PERFCTR_BUS_ACCESS = 0x19,
68 ARMV8_PMUV3_PERFCTR_MEM_ERROR = 0x1A,
69 ARMV8_PMUV3_PERFCTR_BUS_CYCLES = 0x1D,
03089688
WD
70};
71
72/* PMUv3 HW events mapping. */
73static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
ae2fb7ec 74 PERF_MAP_ALL_UNSUPPORTED,
f46f979f 75 [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
03089688
WD
76 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
77 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
78 [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
03089688 79 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
03089688
WD
80};
81
82static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
83 [PERF_COUNT_HW_CACHE_OP_MAX]
84 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
ae2fb7ec
MR
85 PERF_CACHE_MAP_ALL_UNSUPPORTED,
86
87 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
88 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
89 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
90 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
91
92 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
93 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
94 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
95 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
03089688
WD
96};
97
98/*
99 * Perf Events' indices
100 */
101#define ARMV8_IDX_CYCLE_COUNTER 0
102#define ARMV8_IDX_COUNTER0 1
6475b2d8
MR
103#define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \
104 (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
03089688
WD
105
106#define ARMV8_MAX_COUNTERS 32
107#define ARMV8_COUNTER_MASK (ARMV8_MAX_COUNTERS - 1)
108
109/*
110 * ARMv8 low level PMU access
111 */
112
113/*
114 * Perf Event to low level counters mapping
115 */
116#define ARMV8_IDX_TO_COUNTER(x) \
117 (((x) - ARMV8_IDX_COUNTER0) & ARMV8_COUNTER_MASK)
118
119/*
120 * Per-CPU PMCR: config reg
121 */
122#define ARMV8_PMCR_E (1 << 0) /* Enable all counters */
123#define ARMV8_PMCR_P (1 << 1) /* Reset all counters */
124#define ARMV8_PMCR_C (1 << 2) /* Cycle counter reset */
125#define ARMV8_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
126#define ARMV8_PMCR_X (1 << 4) /* Export to ETM */
127#define ARMV8_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
128#define ARMV8_PMCR_N_SHIFT 11 /* Number of counters supported */
129#define ARMV8_PMCR_N_MASK 0x1f
130#define ARMV8_PMCR_MASK 0x3f /* Mask for writable bits */
131
132/*
133 * PMOVSR: counters overflow flag status reg
134 */
135#define ARMV8_OVSR_MASK 0xffffffff /* Mask for writable bits */
136#define ARMV8_OVERFLOWED_MASK ARMV8_OVSR_MASK
137
138/*
139 * PMXEVTYPER: Event selection reg
140 */
c019de3d
VK
141#define ARMV8_EVTYPE_MASK 0xc80003ff /* Mask for writable bits */
142#define ARMV8_EVTYPE_EVENT 0x3ff /* Mask for EVENT bits */
03089688
WD
143
144/*
145 * Event filters for PMUv3
146 */
147#define ARMV8_EXCLUDE_EL1 (1 << 31)
148#define ARMV8_EXCLUDE_EL0 (1 << 30)
149#define ARMV8_INCLUDE_EL2 (1 << 27)
150
151static inline u32 armv8pmu_pmcr_read(void)
152{
153 u32 val;
154 asm volatile("mrs %0, pmcr_el0" : "=r" (val));
155 return val;
156}
157
158static inline void armv8pmu_pmcr_write(u32 val)
159{
160 val &= ARMV8_PMCR_MASK;
161 isb();
162 asm volatile("msr pmcr_el0, %0" :: "r" (val));
163}
164
165static inline int armv8pmu_has_overflowed(u32 pmovsr)
166{
167 return pmovsr & ARMV8_OVERFLOWED_MASK;
168}
169
6475b2d8 170static inline int armv8pmu_counter_valid(struct arm_pmu *cpu_pmu, int idx)
03089688 171{
6475b2d8
MR
172 return idx >= ARMV8_IDX_CYCLE_COUNTER &&
173 idx <= ARMV8_IDX_COUNTER_LAST(cpu_pmu);
03089688
WD
174}
175
176static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
177{
6475b2d8 178 return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
03089688
WD
179}
180
181static inline int armv8pmu_select_counter(int idx)
182{
6475b2d8 183 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
03089688
WD
184 asm volatile("msr pmselr_el0, %0" :: "r" (counter));
185 isb();
186
187 return idx;
188}
189
6475b2d8 190static inline u32 armv8pmu_read_counter(struct perf_event *event)
03089688 191{
6475b2d8
MR
192 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
193 struct hw_perf_event *hwc = &event->hw;
194 int idx = hwc->idx;
03089688
WD
195 u32 value = 0;
196
6475b2d8 197 if (!armv8pmu_counter_valid(cpu_pmu, idx))
03089688
WD
198 pr_err("CPU%u reading wrong counter %d\n",
199 smp_processor_id(), idx);
200 else if (idx == ARMV8_IDX_CYCLE_COUNTER)
201 asm volatile("mrs %0, pmccntr_el0" : "=r" (value));
202 else if (armv8pmu_select_counter(idx) == idx)
203 asm volatile("mrs %0, pmxevcntr_el0" : "=r" (value));
204
205 return value;
206}
207
6475b2d8 208static inline void armv8pmu_write_counter(struct perf_event *event, u32 value)
03089688 209{
6475b2d8
MR
210 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
211 struct hw_perf_event *hwc = &event->hw;
212 int idx = hwc->idx;
213
214 if (!armv8pmu_counter_valid(cpu_pmu, idx))
03089688
WD
215 pr_err("CPU%u writing wrong counter %d\n",
216 smp_processor_id(), idx);
217 else if (idx == ARMV8_IDX_CYCLE_COUNTER)
218 asm volatile("msr pmccntr_el0, %0" :: "r" (value));
219 else if (armv8pmu_select_counter(idx) == idx)
220 asm volatile("msr pmxevcntr_el0, %0" :: "r" (value));
221}
222
223static inline void armv8pmu_write_evtype(int idx, u32 val)
224{
225 if (armv8pmu_select_counter(idx) == idx) {
226 val &= ARMV8_EVTYPE_MASK;
227 asm volatile("msr pmxevtyper_el0, %0" :: "r" (val));
228 }
229}
230
231static inline int armv8pmu_enable_counter(int idx)
232{
6475b2d8 233 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
03089688
WD
234 asm volatile("msr pmcntenset_el0, %0" :: "r" (BIT(counter)));
235 return idx;
236}
237
238static inline int armv8pmu_disable_counter(int idx)
239{
6475b2d8 240 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
03089688
WD
241 asm volatile("msr pmcntenclr_el0, %0" :: "r" (BIT(counter)));
242 return idx;
243}
244
245static inline int armv8pmu_enable_intens(int idx)
246{
6475b2d8 247 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
03089688
WD
248 asm volatile("msr pmintenset_el1, %0" :: "r" (BIT(counter)));
249 return idx;
250}
251
252static inline int armv8pmu_disable_intens(int idx)
253{
6475b2d8 254 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
03089688
WD
255 asm volatile("msr pmintenclr_el1, %0" :: "r" (BIT(counter)));
256 isb();
257 /* Clear the overflow flag in case an interrupt is pending. */
258 asm volatile("msr pmovsclr_el0, %0" :: "r" (BIT(counter)));
259 isb();
6475b2d8 260
03089688
WD
261 return idx;
262}
263
264static inline u32 armv8pmu_getreset_flags(void)
265{
266 u32 value;
267
268 /* Read */
269 asm volatile("mrs %0, pmovsclr_el0" : "=r" (value));
270
271 /* Write to clear flags */
272 value &= ARMV8_OVSR_MASK;
273 asm volatile("msr pmovsclr_el0, %0" :: "r" (value));
274
275 return value;
276}
277
6475b2d8 278static void armv8pmu_enable_event(struct perf_event *event)
03089688
WD
279{
280 unsigned long flags;
6475b2d8
MR
281 struct hw_perf_event *hwc = &event->hw;
282 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
283 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
284 int idx = hwc->idx;
03089688
WD
285
286 /*
287 * Enable counter and interrupt, and set the counter to count
288 * the event that we're interested in.
289 */
290 raw_spin_lock_irqsave(&events->pmu_lock, flags);
291
292 /*
293 * Disable counter
294 */
295 armv8pmu_disable_counter(idx);
296
297 /*
298 * Set event (if destined for PMNx counters).
299 */
300 armv8pmu_write_evtype(idx, hwc->config_base);
301
302 /*
303 * Enable interrupt for this counter
304 */
305 armv8pmu_enable_intens(idx);
306
307 /*
308 * Enable counter
309 */
310 armv8pmu_enable_counter(idx);
311
312 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
313}
314
6475b2d8 315static void armv8pmu_disable_event(struct perf_event *event)
03089688
WD
316{
317 unsigned long flags;
6475b2d8
MR
318 struct hw_perf_event *hwc = &event->hw;
319 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
320 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
321 int idx = hwc->idx;
03089688
WD
322
323 /*
324 * Disable counter and interrupt
325 */
326 raw_spin_lock_irqsave(&events->pmu_lock, flags);
327
328 /*
329 * Disable counter
330 */
331 armv8pmu_disable_counter(idx);
332
333 /*
334 * Disable interrupt for this counter
335 */
336 armv8pmu_disable_intens(idx);
337
338 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
339}
340
341static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
342{
343 u32 pmovsr;
344 struct perf_sample_data data;
6475b2d8
MR
345 struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
346 struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
03089688
WD
347 struct pt_regs *regs;
348 int idx;
349
350 /*
351 * Get and reset the IRQ flags
352 */
353 pmovsr = armv8pmu_getreset_flags();
354
355 /*
356 * Did an overflow occur?
357 */
358 if (!armv8pmu_has_overflowed(pmovsr))
359 return IRQ_NONE;
360
361 /*
362 * Handle the counter(s) overflow(s)
363 */
364 regs = get_irq_regs();
365
03089688
WD
366 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
367 struct perf_event *event = cpuc->events[idx];
368 struct hw_perf_event *hwc;
369
370 /* Ignore if we don't have an event. */
371 if (!event)
372 continue;
373
374 /*
375 * We have a single interrupt for all counters. Check that
376 * each counter has overflowed before we process it.
377 */
378 if (!armv8pmu_counter_has_overflowed(pmovsr, idx))
379 continue;
380
381 hwc = &event->hw;
6475b2d8 382 armpmu_event_update(event);
03089688 383 perf_sample_data_init(&data, 0, hwc->last_period);
6475b2d8 384 if (!armpmu_event_set_period(event))
03089688
WD
385 continue;
386
387 if (perf_event_overflow(event, &data, regs))
6475b2d8 388 cpu_pmu->disable(event);
03089688
WD
389 }
390
391 /*
392 * Handle the pending perf events.
393 *
394 * Note: this call *must* be run with interrupts disabled. For
395 * platforms that can have the PMU interrupts raised as an NMI, this
396 * will not work.
397 */
398 irq_work_run();
399
400 return IRQ_HANDLED;
401}
402
6475b2d8 403static void armv8pmu_start(struct arm_pmu *cpu_pmu)
03089688
WD
404{
405 unsigned long flags;
6475b2d8 406 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
03089688
WD
407
408 raw_spin_lock_irqsave(&events->pmu_lock, flags);
409 /* Enable all counters */
410 armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMCR_E);
411 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
412}
413
6475b2d8 414static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
03089688
WD
415{
416 unsigned long flags;
6475b2d8 417 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
03089688
WD
418
419 raw_spin_lock_irqsave(&events->pmu_lock, flags);
420 /* Disable all counters */
421 armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMCR_E);
422 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
423}
424
425static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
6475b2d8 426 struct perf_event *event)
03089688
WD
427{
428 int idx;
6475b2d8
MR
429 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
430 struct hw_perf_event *hwc = &event->hw;
431 unsigned long evtype = hwc->config_base & ARMV8_EVTYPE_EVENT;
03089688
WD
432
433 /* Always place a cycle counter into the cycle counter. */
f46f979f 434 if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) {
03089688
WD
435 if (test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
436 return -EAGAIN;
437
438 return ARMV8_IDX_CYCLE_COUNTER;
439 }
440
441 /*
442 * For anything other than a cycle counter, try and use
443 * the events counters
444 */
445 for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
446 if (!test_and_set_bit(idx, cpuc->used_mask))
447 return idx;
448 }
449
450 /* The counters are all in use. */
451 return -EAGAIN;
452}
453
454/*
455 * Add an event filter to a given event. This will only work for PMUv2 PMUs.
456 */
457static int armv8pmu_set_event_filter(struct hw_perf_event *event,
458 struct perf_event_attr *attr)
459{
460 unsigned long config_base = 0;
461
462 if (attr->exclude_idle)
463 return -EPERM;
464 if (attr->exclude_user)
465 config_base |= ARMV8_EXCLUDE_EL0;
466 if (attr->exclude_kernel)
467 config_base |= ARMV8_EXCLUDE_EL1;
468 if (!attr->exclude_hv)
469 config_base |= ARMV8_INCLUDE_EL2;
470
471 /*
472 * Install the filter into config_base as this is used to
473 * construct the event type.
474 */
475 event->config_base = config_base;
476
477 return 0;
478}
479
480static void armv8pmu_reset(void *info)
481{
6475b2d8 482 struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
03089688
WD
483 u32 idx, nb_cnt = cpu_pmu->num_events;
484
485 /* The counter and interrupt enable registers are unknown at reset. */
6475b2d8
MR
486 for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
487 armv8pmu_disable_counter(idx);
488 armv8pmu_disable_intens(idx);
489 }
03089688
WD
490
491 /* Initialize & Reset PMNC: C and P bits. */
492 armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C);
493
494 /* Disable access from userspace. */
495 asm volatile("msr pmuserenr_el0, %0" :: "r" (0));
496}
497
498static int armv8_pmuv3_map_event(struct perf_event *event)
499{
6475b2d8 500 return armpmu_map_event(event, &armv8_pmuv3_perf_map,
c019de3d
VK
501 &armv8_pmuv3_perf_cache_map,
502 ARMV8_EVTYPE_EVENT);
03089688
WD
503}
504
6475b2d8 505static void armv8pmu_read_num_pmnc_events(void *info)
03089688 506{
6475b2d8 507 int *nb_cnt = info;
03089688
WD
508
509 /* Read the nb of CNTx counters supported from PMNC */
6475b2d8 510 *nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK;
03089688 511
6475b2d8
MR
512 /* Add the CPU cycles counter */
513 *nb_cnt += 1;
03089688
WD
514}
515
6475b2d8 516static int armv8pmu_probe_num_events(struct arm_pmu *arm_pmu)
03089688 517{
6475b2d8
MR
518 return smp_call_function_any(&arm_pmu->supported_cpus,
519 armv8pmu_read_num_pmnc_events,
520 &arm_pmu->num_events, 1);
03089688
WD
521}
522
6475b2d8 523static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu)
03089688 524{
6475b2d8
MR
525 cpu_pmu->handle_irq = armv8pmu_handle_irq,
526 cpu_pmu->enable = armv8pmu_enable_event,
527 cpu_pmu->disable = armv8pmu_disable_event,
528 cpu_pmu->read_counter = armv8pmu_read_counter,
529 cpu_pmu->write_counter = armv8pmu_write_counter,
530 cpu_pmu->get_event_idx = armv8pmu_get_event_idx,
531 cpu_pmu->start = armv8pmu_start,
532 cpu_pmu->stop = armv8pmu_stop,
533 cpu_pmu->reset = armv8pmu_reset,
534 cpu_pmu->max_period = (1LLU << 32) - 1,
535 cpu_pmu->name = "armv8_pmuv3";
536 cpu_pmu->map_event = armv8_pmuv3_map_event;
537 cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
538 return armv8pmu_probe_num_events(cpu_pmu);
03089688 539}
03089688 540
6475b2d8
MR
541static const struct of_device_id armv8_pmu_of_device_ids[] = {
542 {.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_init},
03089688
WD
543 {},
544};
545
6475b2d8 546static int armv8_pmu_device_probe(struct platform_device *pdev)
03089688 547{
6475b2d8 548 return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL);
03089688
WD
549}
550
6475b2d8 551static struct platform_driver armv8_pmu_driver = {
03089688 552 .driver = {
6475b2d8
MR
553 .name = "armv8-pmu",
554 .of_match_table = armv8_pmu_of_device_ids,
03089688 555 },
6475b2d8 556 .probe = armv8_pmu_device_probe,
03089688
WD
557};
558
6475b2d8 559static int __init register_armv8_pmu_driver(void)
03089688 560{
6475b2d8 561 return platform_driver_register(&armv8_pmu_driver);
03089688 562}
6475b2d8 563device_initcall(register_armv8_pmu_driver);
This page took 1.868086 seconds and 5 git commands to generate.