Commit | Line | Data |
---|---|---|
1b8873a0 JI |
1 | #undef DEBUG |
2 | ||
3 | /* | |
4 | * ARM performance counter support. | |
5 | * | |
6 | * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles | |
43eab878 | 7 | * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com> |
796d1295 | 8 | * |
1b8873a0 | 9 | * This code is based on the sparc64 perf event code, which is in turn based |
d39976f0 | 10 | * on the x86 code. |
1b8873a0 JI |
11 | */ |
12 | #define pr_fmt(fmt) "hw perfevents: " fmt | |
13 | ||
74cf0bc7 | 14 | #include <linux/bitmap.h> |
cc88116d | 15 | #include <linux/cpumask.h> |
da4e4f18 | 16 | #include <linux/cpu_pm.h> |
74cf0bc7 | 17 | #include <linux/export.h> |
1b8873a0 | 18 | #include <linux/kernel.h> |
bc1e3c46 | 19 | #include <linux/of_device.h> |
fa8ad788 | 20 | #include <linux/perf/arm_pmu.h> |
49c006b9 | 21 | #include <linux/platform_device.h> |
74cf0bc7 MR |
22 | #include <linux/slab.h> |
23 | #include <linux/spinlock.h> | |
bbd64559 SB |
24 | #include <linux/irq.h> |
25 | #include <linux/irqdesc.h> | |
1b8873a0 | 26 | |
74cf0bc7 | 27 | #include <asm/cputype.h> |
1b8873a0 | 28 | #include <asm/irq_regs.h> |
1b8873a0 | 29 | |
1b8873a0 | 30 | static int |
e1f431b5 MR |
31 | armpmu_map_cache_event(const unsigned (*cache_map) |
32 | [PERF_COUNT_HW_CACHE_MAX] | |
33 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
34 | [PERF_COUNT_HW_CACHE_RESULT_MAX], | |
35 | u64 config) | |
1b8873a0 JI |
36 | { |
37 | unsigned int cache_type, cache_op, cache_result, ret; | |
38 | ||
39 | cache_type = (config >> 0) & 0xff; | |
40 | if (cache_type >= PERF_COUNT_HW_CACHE_MAX) | |
41 | return -EINVAL; | |
42 | ||
43 | cache_op = (config >> 8) & 0xff; | |
44 | if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) | |
45 | return -EINVAL; | |
46 | ||
47 | cache_result = (config >> 16) & 0xff; | |
48 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | |
49 | return -EINVAL; | |
50 | ||
e1f431b5 | 51 | ret = (int)(*cache_map)[cache_type][cache_op][cache_result]; |
1b8873a0 JI |
52 | |
53 | if (ret == CACHE_OP_UNSUPPORTED) | |
54 | return -ENOENT; | |
55 | ||
56 | return ret; | |
57 | } | |
58 | ||
84fee97a | 59 | static int |
6dbc0029 | 60 | armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) |
84fee97a | 61 | { |
d9f96635 SB |
62 | int mapping; |
63 | ||
64 | if (config >= PERF_COUNT_HW_MAX) | |
65 | return -EINVAL; | |
66 | ||
67 | mapping = (*event_map)[config]; | |
e1f431b5 | 68 | return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; |
84fee97a WD |
69 | } |
70 | ||
71 | static int | |
e1f431b5 | 72 | armpmu_map_raw_event(u32 raw_event_mask, u64 config) |
84fee97a | 73 | { |
e1f431b5 MR |
74 | return (int)(config & raw_event_mask); |
75 | } | |
76 | ||
6dbc0029 WD |
77 | int |
78 | armpmu_map_event(struct perf_event *event, | |
79 | const unsigned (*event_map)[PERF_COUNT_HW_MAX], | |
80 | const unsigned (*cache_map) | |
81 | [PERF_COUNT_HW_CACHE_MAX] | |
82 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
83 | [PERF_COUNT_HW_CACHE_RESULT_MAX], | |
84 | u32 raw_event_mask) | |
e1f431b5 MR |
85 | { |
86 | u64 config = event->attr.config; | |
67b4305a | 87 | int type = event->attr.type; |
e1f431b5 | 88 | |
67b4305a MR |
89 | if (type == event->pmu->type) |
90 | return armpmu_map_raw_event(raw_event_mask, config); | |
91 | ||
92 | switch (type) { | |
e1f431b5 | 93 | case PERF_TYPE_HARDWARE: |
6dbc0029 | 94 | return armpmu_map_hw_event(event_map, config); |
e1f431b5 MR |
95 | case PERF_TYPE_HW_CACHE: |
96 | return armpmu_map_cache_event(cache_map, config); | |
97 | case PERF_TYPE_RAW: | |
98 | return armpmu_map_raw_event(raw_event_mask, config); | |
99 | } | |
100 | ||
101 | return -ENOENT; | |
84fee97a WD |
102 | } |
103 | ||
ed6f2a52 | 104 | int armpmu_event_set_period(struct perf_event *event) |
1b8873a0 | 105 | { |
8a16b34e | 106 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
ed6f2a52 | 107 | struct hw_perf_event *hwc = &event->hw; |
e7850595 | 108 | s64 left = local64_read(&hwc->period_left); |
1b8873a0 JI |
109 | s64 period = hwc->sample_period; |
110 | int ret = 0; | |
111 | ||
112 | if (unlikely(left <= -period)) { | |
113 | left = period; | |
e7850595 | 114 | local64_set(&hwc->period_left, left); |
1b8873a0 JI |
115 | hwc->last_period = period; |
116 | ret = 1; | |
117 | } | |
118 | ||
119 | if (unlikely(left <= 0)) { | |
120 | left += period; | |
e7850595 | 121 | local64_set(&hwc->period_left, left); |
1b8873a0 JI |
122 | hwc->last_period = period; |
123 | ret = 1; | |
124 | } | |
125 | ||
2d9ed740 DT |
126 | /* |
127 | * Limit the maximum period to prevent the counter value | |
128 | * from overtaking the one we are about to program. In | |
129 | * effect we are reducing max_period to account for | |
130 | * interrupt latency (and we are being very conservative). | |
131 | */ | |
132 | if (left > (armpmu->max_period >> 1)) | |
133 | left = armpmu->max_period >> 1; | |
1b8873a0 | 134 | |
e7850595 | 135 | local64_set(&hwc->prev_count, (u64)-left); |
1b8873a0 | 136 | |
ed6f2a52 | 137 | armpmu->write_counter(event, (u64)(-left) & 0xffffffff); |
1b8873a0 JI |
138 | |
139 | perf_event_update_userpage(event); | |
140 | ||
141 | return ret; | |
142 | } | |
143 | ||
ed6f2a52 | 144 | u64 armpmu_event_update(struct perf_event *event) |
1b8873a0 | 145 | { |
8a16b34e | 146 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
ed6f2a52 | 147 | struct hw_perf_event *hwc = &event->hw; |
a737823d | 148 | u64 delta, prev_raw_count, new_raw_count; |
1b8873a0 JI |
149 | |
150 | again: | |
e7850595 | 151 | prev_raw_count = local64_read(&hwc->prev_count); |
ed6f2a52 | 152 | new_raw_count = armpmu->read_counter(event); |
1b8873a0 | 153 | |
e7850595 | 154 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, |
1b8873a0 JI |
155 | new_raw_count) != prev_raw_count) |
156 | goto again; | |
157 | ||
57273471 | 158 | delta = (new_raw_count - prev_raw_count) & armpmu->max_period; |
1b8873a0 | 159 | |
e7850595 PZ |
160 | local64_add(delta, &event->count); |
161 | local64_sub(delta, &hwc->period_left); | |
1b8873a0 JI |
162 | |
163 | return new_raw_count; | |
164 | } | |
165 | ||
166 | static void | |
a4eaf7f1 | 167 | armpmu_read(struct perf_event *event) |
1b8873a0 | 168 | { |
ed6f2a52 | 169 | armpmu_event_update(event); |
1b8873a0 JI |
170 | } |
171 | ||
172 | static void | |
a4eaf7f1 | 173 | armpmu_stop(struct perf_event *event, int flags) |
1b8873a0 | 174 | { |
8a16b34e | 175 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
1b8873a0 JI |
176 | struct hw_perf_event *hwc = &event->hw; |
177 | ||
a4eaf7f1 PZ |
178 | /* |
179 | * ARM pmu always has to update the counter, so ignore | |
180 | * PERF_EF_UPDATE, see comments in armpmu_start(). | |
181 | */ | |
182 | if (!(hwc->state & PERF_HES_STOPPED)) { | |
ed6f2a52 SK |
183 | armpmu->disable(event); |
184 | armpmu_event_update(event); | |
a4eaf7f1 PZ |
185 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; |
186 | } | |
1b8873a0 JI |
187 | } |
188 | ||
ed6f2a52 | 189 | static void armpmu_start(struct perf_event *event, int flags) |
1b8873a0 | 190 | { |
8a16b34e | 191 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
1b8873a0 JI |
192 | struct hw_perf_event *hwc = &event->hw; |
193 | ||
a4eaf7f1 PZ |
194 | /* |
195 | * ARM pmu always has to reprogram the period, so ignore | |
196 | * PERF_EF_RELOAD, see the comment below. | |
197 | */ | |
198 | if (flags & PERF_EF_RELOAD) | |
199 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | |
200 | ||
201 | hwc->state = 0; | |
1b8873a0 JI |
202 | /* |
203 | * Set the period again. Some counters can't be stopped, so when we | |
a4eaf7f1 | 204 | * were stopped we simply disabled the IRQ source and the counter |
1b8873a0 JI |
205 | * may have been left counting. If we don't do this step then we may |
206 | * get an interrupt too soon or *way* too late if the overflow has | |
207 | * happened since disabling. | |
208 | */ | |
ed6f2a52 SK |
209 | armpmu_event_set_period(event); |
210 | armpmu->enable(event); | |
1b8873a0 JI |
211 | } |
212 | ||
a4eaf7f1 PZ |
213 | static void |
214 | armpmu_del(struct perf_event *event, int flags) | |
215 | { | |
8a16b34e | 216 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
11679250 | 217 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); |
a4eaf7f1 PZ |
218 | struct hw_perf_event *hwc = &event->hw; |
219 | int idx = hwc->idx; | |
220 | ||
a4eaf7f1 | 221 | armpmu_stop(event, PERF_EF_UPDATE); |
8be3f9a2 MR |
222 | hw_events->events[idx] = NULL; |
223 | clear_bit(idx, hw_events->used_mask); | |
eab443ef SB |
224 | if (armpmu->clear_event_idx) |
225 | armpmu->clear_event_idx(hw_events, event); | |
a4eaf7f1 PZ |
226 | |
227 | perf_event_update_userpage(event); | |
228 | } | |
229 | ||
1b8873a0 | 230 | static int |
a4eaf7f1 | 231 | armpmu_add(struct perf_event *event, int flags) |
1b8873a0 | 232 | { |
8a16b34e | 233 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
11679250 | 234 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); |
1b8873a0 JI |
235 | struct hw_perf_event *hwc = &event->hw; |
236 | int idx; | |
237 | int err = 0; | |
238 | ||
cc88116d MR |
239 | /* An event following a process won't be stopped earlier */ |
240 | if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) | |
241 | return -ENOENT; | |
242 | ||
33696fc0 | 243 | perf_pmu_disable(event->pmu); |
24cd7f54 | 244 | |
1b8873a0 | 245 | /* If we don't have a space for the counter then finish early. */ |
ed6f2a52 | 246 | idx = armpmu->get_event_idx(hw_events, event); |
1b8873a0 JI |
247 | if (idx < 0) { |
248 | err = idx; | |
249 | goto out; | |
250 | } | |
251 | ||
252 | /* | |
253 | * If there is an event in the counter we are going to use then make | |
254 | * sure it is disabled. | |
255 | */ | |
256 | event->hw.idx = idx; | |
ed6f2a52 | 257 | armpmu->disable(event); |
8be3f9a2 | 258 | hw_events->events[idx] = event; |
1b8873a0 | 259 | |
a4eaf7f1 PZ |
260 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; |
261 | if (flags & PERF_EF_START) | |
262 | armpmu_start(event, PERF_EF_RELOAD); | |
1b8873a0 JI |
263 | |
264 | /* Propagate our changes to the userspace mapping. */ | |
265 | perf_event_update_userpage(event); | |
266 | ||
267 | out: | |
33696fc0 | 268 | perf_pmu_enable(event->pmu); |
1b8873a0 JI |
269 | return err; |
270 | } | |
271 | ||
1b8873a0 | 272 | static int |
e429817b SP |
273 | validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events, |
274 | struct perf_event *event) | |
1b8873a0 | 275 | { |
e429817b | 276 | struct arm_pmu *armpmu; |
1b8873a0 | 277 | |
c95eb318 WD |
278 | if (is_software_event(event)) |
279 | return 1; | |
280 | ||
e429817b SP |
281 | /* |
282 | * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The | |
283 | * core perf code won't check that the pmu->ctx == leader->ctx | |
284 | * until after pmu->event_init(event). | |
285 | */ | |
286 | if (event->pmu != pmu) | |
287 | return 0; | |
288 | ||
2dfcb802 | 289 | if (event->state < PERF_EVENT_STATE_OFF) |
cb2d8b34 WD |
290 | return 1; |
291 | ||
292 | if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) | |
65b4711f | 293 | return 1; |
1b8873a0 | 294 | |
e429817b | 295 | armpmu = to_arm_pmu(event->pmu); |
ed6f2a52 | 296 | return armpmu->get_event_idx(hw_events, event) >= 0; |
1b8873a0 JI |
297 | } |
298 | ||
299 | static int | |
300 | validate_group(struct perf_event *event) | |
301 | { | |
302 | struct perf_event *sibling, *leader = event->group_leader; | |
8be3f9a2 | 303 | struct pmu_hw_events fake_pmu; |
1b8873a0 | 304 | |
bce34d14 WD |
305 | /* |
306 | * Initialise the fake PMU. We only need to populate the | |
307 | * used_mask for the purposes of validation. | |
308 | */ | |
a4560846 | 309 | memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask)); |
1b8873a0 | 310 | |
e429817b | 311 | if (!validate_event(event->pmu, &fake_pmu, leader)) |
aa2bc1ad | 312 | return -EINVAL; |
1b8873a0 JI |
313 | |
314 | list_for_each_entry(sibling, &leader->sibling_list, group_entry) { | |
e429817b | 315 | if (!validate_event(event->pmu, &fake_pmu, sibling)) |
aa2bc1ad | 316 | return -EINVAL; |
1b8873a0 JI |
317 | } |
318 | ||
e429817b | 319 | if (!validate_event(event->pmu, &fake_pmu, event)) |
aa2bc1ad | 320 | return -EINVAL; |
1b8873a0 JI |
321 | |
322 | return 0; | |
323 | } | |
324 | ||
051f1b13 | 325 | static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) |
0e25a5c9 | 326 | { |
bbd64559 SB |
327 | struct arm_pmu *armpmu; |
328 | struct platform_device *plat_device; | |
329 | struct arm_pmu_platdata *plat; | |
5f5092e7 WD |
330 | int ret; |
331 | u64 start_clock, finish_clock; | |
bbd64559 | 332 | |
5ebd9200 MR |
333 | /* |
334 | * we request the IRQ with a (possibly percpu) struct arm_pmu**, but | |
335 | * the handlers expect a struct arm_pmu*. The percpu_irq framework will | |
336 | * do any necessary shifting, we just need to perform the first | |
337 | * dereference. | |
338 | */ | |
339 | armpmu = *(void **)dev; | |
bbd64559 SB |
340 | plat_device = armpmu->plat_device; |
341 | plat = dev_get_platdata(&plat_device->dev); | |
0e25a5c9 | 342 | |
5f5092e7 | 343 | start_clock = sched_clock(); |
051f1b13 | 344 | if (plat && plat->handle_irq) |
5ebd9200 | 345 | ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq); |
051f1b13 | 346 | else |
5ebd9200 | 347 | ret = armpmu->handle_irq(irq, armpmu); |
5f5092e7 WD |
348 | finish_clock = sched_clock(); |
349 | ||
350 | perf_sample_event_took(finish_clock - start_clock); | |
351 | return ret; | |
0e25a5c9 RV |
352 | } |
353 | ||
0b390e21 | 354 | static void |
8a16b34e | 355 | armpmu_release_hardware(struct arm_pmu *armpmu) |
0b390e21 | 356 | { |
ed6f2a52 | 357 | armpmu->free_irq(armpmu); |
0b390e21 WD |
358 | } |
359 | ||
1b8873a0 | 360 | static int |
8a16b34e | 361 | armpmu_reserve_hardware(struct arm_pmu *armpmu) |
1b8873a0 | 362 | { |
ed61f985 | 363 | int err = armpmu->request_irq(armpmu, armpmu_dispatch_irq); |
051f1b13 SK |
364 | if (err) { |
365 | armpmu_release_hardware(armpmu); | |
366 | return err; | |
49c006b9 | 367 | } |
1b8873a0 | 368 | |
0b390e21 | 369 | return 0; |
1b8873a0 JI |
370 | } |
371 | ||
1b8873a0 JI |
372 | static void |
373 | hw_perf_event_destroy(struct perf_event *event) | |
374 | { | |
8a16b34e | 375 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
03b7898d MR |
376 | atomic_t *active_events = &armpmu->active_events; |
377 | struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex; | |
378 | ||
379 | if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) { | |
8a16b34e | 380 | armpmu_release_hardware(armpmu); |
03b7898d | 381 | mutex_unlock(pmu_reserve_mutex); |
1b8873a0 JI |
382 | } |
383 | } | |
384 | ||
05d22fde WD |
385 | static int |
386 | event_requires_mode_exclusion(struct perf_event_attr *attr) | |
387 | { | |
388 | return attr->exclude_idle || attr->exclude_user || | |
389 | attr->exclude_kernel || attr->exclude_hv; | |
390 | } | |
391 | ||
1b8873a0 JI |
392 | static int |
393 | __hw_perf_event_init(struct perf_event *event) | |
394 | { | |
8a16b34e | 395 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
1b8873a0 | 396 | struct hw_perf_event *hwc = &event->hw; |
9dcbf466 | 397 | int mapping; |
1b8873a0 | 398 | |
e1f431b5 | 399 | mapping = armpmu->map_event(event); |
1b8873a0 JI |
400 | |
401 | if (mapping < 0) { | |
402 | pr_debug("event %x:%llx not supported\n", event->attr.type, | |
403 | event->attr.config); | |
404 | return mapping; | |
405 | } | |
406 | ||
05d22fde WD |
407 | /* |
408 | * We don't assign an index until we actually place the event onto | |
409 | * hardware. Use -1 to signify that we haven't decided where to put it | |
410 | * yet. For SMP systems, each core has it's own PMU so we can't do any | |
411 | * clever allocation or constraints checking at this point. | |
412 | */ | |
413 | hwc->idx = -1; | |
414 | hwc->config_base = 0; | |
415 | hwc->config = 0; | |
416 | hwc->event_base = 0; | |
417 | ||
1b8873a0 JI |
418 | /* |
419 | * Check whether we need to exclude the counter from certain modes. | |
1b8873a0 | 420 | */ |
05d22fde WD |
421 | if ((!armpmu->set_event_filter || |
422 | armpmu->set_event_filter(hwc, &event->attr)) && | |
423 | event_requires_mode_exclusion(&event->attr)) { | |
1b8873a0 JI |
424 | pr_debug("ARM performance counters do not support " |
425 | "mode exclusion\n"); | |
fdeb8e35 | 426 | return -EOPNOTSUPP; |
1b8873a0 JI |
427 | } |
428 | ||
429 | /* | |
05d22fde | 430 | * Store the event encoding into the config_base field. |
1b8873a0 | 431 | */ |
05d22fde | 432 | hwc->config_base |= (unsigned long)mapping; |
1b8873a0 | 433 | |
edcb4d3c | 434 | if (!is_sampling_event(event)) { |
57273471 WD |
435 | /* |
436 | * For non-sampling runs, limit the sample_period to half | |
437 | * of the counter width. That way, the new counter value | |
438 | * is far less likely to overtake the previous one unless | |
439 | * you have some serious IRQ latency issues. | |
440 | */ | |
441 | hwc->sample_period = armpmu->max_period >> 1; | |
1b8873a0 | 442 | hwc->last_period = hwc->sample_period; |
e7850595 | 443 | local64_set(&hwc->period_left, hwc->sample_period); |
1b8873a0 JI |
444 | } |
445 | ||
1b8873a0 | 446 | if (event->group_leader != event) { |
e595ede6 | 447 | if (validate_group(event) != 0) |
1b8873a0 JI |
448 | return -EINVAL; |
449 | } | |
450 | ||
9dcbf466 | 451 | return 0; |
1b8873a0 JI |
452 | } |
453 | ||
b0a873eb | 454 | static int armpmu_event_init(struct perf_event *event) |
1b8873a0 | 455 | { |
8a16b34e | 456 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
1b8873a0 | 457 | int err = 0; |
03b7898d | 458 | atomic_t *active_events = &armpmu->active_events; |
1b8873a0 | 459 | |
cc88116d MR |
460 | /* |
461 | * Reject CPU-affine events for CPUs that are of a different class to | |
462 | * that which this PMU handles. Process-following events (where | |
463 | * event->cpu == -1) can be migrated between CPUs, and thus we have to | |
464 | * reject them later (in armpmu_add) if they're scheduled on a | |
465 | * different class of CPU. | |
466 | */ | |
467 | if (event->cpu != -1 && | |
468 | !cpumask_test_cpu(event->cpu, &armpmu->supported_cpus)) | |
469 | return -ENOENT; | |
470 | ||
2481c5fa SE |
471 | /* does not support taken branch sampling */ |
472 | if (has_branch_stack(event)) | |
473 | return -EOPNOTSUPP; | |
474 | ||
e1f431b5 | 475 | if (armpmu->map_event(event) == -ENOENT) |
b0a873eb | 476 | return -ENOENT; |
b0a873eb | 477 | |
1b8873a0 JI |
478 | event->destroy = hw_perf_event_destroy; |
479 | ||
03b7898d MR |
480 | if (!atomic_inc_not_zero(active_events)) { |
481 | mutex_lock(&armpmu->reserve_mutex); | |
482 | if (atomic_read(active_events) == 0) | |
8a16b34e | 483 | err = armpmu_reserve_hardware(armpmu); |
1b8873a0 JI |
484 | |
485 | if (!err) | |
03b7898d MR |
486 | atomic_inc(active_events); |
487 | mutex_unlock(&armpmu->reserve_mutex); | |
1b8873a0 JI |
488 | } |
489 | ||
490 | if (err) | |
b0a873eb | 491 | return err; |
1b8873a0 JI |
492 | |
493 | err = __hw_perf_event_init(event); | |
494 | if (err) | |
495 | hw_perf_event_destroy(event); | |
496 | ||
b0a873eb | 497 | return err; |
1b8873a0 JI |
498 | } |
499 | ||
a4eaf7f1 | 500 | static void armpmu_enable(struct pmu *pmu) |
1b8873a0 | 501 | { |
8be3f9a2 | 502 | struct arm_pmu *armpmu = to_arm_pmu(pmu); |
11679250 | 503 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); |
7325eaec | 504 | int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); |
1b8873a0 | 505 | |
cc88116d MR |
506 | /* For task-bound events we may be called on other CPUs */ |
507 | if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) | |
508 | return; | |
509 | ||
f4f38430 | 510 | if (enabled) |
ed6f2a52 | 511 | armpmu->start(armpmu); |
1b8873a0 JI |
512 | } |
513 | ||
a4eaf7f1 | 514 | static void armpmu_disable(struct pmu *pmu) |
1b8873a0 | 515 | { |
8a16b34e | 516 | struct arm_pmu *armpmu = to_arm_pmu(pmu); |
cc88116d MR |
517 | |
518 | /* For task-bound events we may be called on other CPUs */ | |
519 | if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) | |
520 | return; | |
521 | ||
ed6f2a52 | 522 | armpmu->stop(armpmu); |
1b8873a0 JI |
523 | } |
524 | ||
c904e32a MR |
525 | /* |
526 | * In heterogeneous systems, events are specific to a particular | |
527 | * microarchitecture, and aren't suitable for another. Thus, only match CPUs of | |
528 | * the same microarchitecture. | |
529 | */ | |
530 | static int armpmu_filter_match(struct perf_event *event) | |
531 | { | |
532 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | |
533 | unsigned int cpu = smp_processor_id(); | |
534 | return cpumask_test_cpu(cpu, &armpmu->supported_cpus); | |
535 | } | |
536 | ||
44d6b1fc | 537 | static void armpmu_init(struct arm_pmu *armpmu) |
03b7898d MR |
538 | { |
539 | atomic_set(&armpmu->active_events, 0); | |
540 | mutex_init(&armpmu->reserve_mutex); | |
8a16b34e MR |
541 | |
542 | armpmu->pmu = (struct pmu) { | |
543 | .pmu_enable = armpmu_enable, | |
544 | .pmu_disable = armpmu_disable, | |
545 | .event_init = armpmu_event_init, | |
546 | .add = armpmu_add, | |
547 | .del = armpmu_del, | |
548 | .start = armpmu_start, | |
549 | .stop = armpmu_stop, | |
550 | .read = armpmu_read, | |
c904e32a | 551 | .filter_match = armpmu_filter_match, |
8a16b34e MR |
552 | }; |
553 | } | |
554 | ||
74cf0bc7 MR |
555 | /* Set at runtime when we know what CPU type we are. */ |
556 | static struct arm_pmu *__oprofile_cpu_pmu; | |
557 | ||
558 | /* | |
559 | * Despite the names, these two functions are CPU-specific and are used | |
560 | * by the OProfile/perf code. | |
561 | */ | |
562 | const char *perf_pmu_name(void) | |
563 | { | |
564 | if (!__oprofile_cpu_pmu) | |
565 | return NULL; | |
566 | ||
567 | return __oprofile_cpu_pmu->name; | |
568 | } | |
569 | EXPORT_SYMBOL_GPL(perf_pmu_name); | |
570 | ||
571 | int perf_num_counters(void) | |
572 | { | |
573 | int max_events = 0; | |
574 | ||
575 | if (__oprofile_cpu_pmu != NULL) | |
576 | max_events = __oprofile_cpu_pmu->num_events; | |
577 | ||
578 | return max_events; | |
579 | } | |
580 | EXPORT_SYMBOL_GPL(perf_num_counters); | |
581 | ||
582 | static void cpu_pmu_enable_percpu_irq(void *data) | |
583 | { | |
584 | int irq = *(int *)data; | |
585 | ||
586 | enable_percpu_irq(irq, IRQ_TYPE_NONE); | |
587 | } | |
588 | ||
589 | static void cpu_pmu_disable_percpu_irq(void *data) | |
590 | { | |
591 | int irq = *(int *)data; | |
592 | ||
593 | disable_percpu_irq(irq); | |
594 | } | |
595 | ||
596 | static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) | |
597 | { | |
598 | int i, irq, irqs; | |
599 | struct platform_device *pmu_device = cpu_pmu->plat_device; | |
600 | struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events; | |
601 | ||
602 | irqs = min(pmu_device->num_resources, num_possible_cpus()); | |
603 | ||
604 | irq = platform_get_irq(pmu_device, 0); | |
605 | if (irq >= 0 && irq_is_percpu(irq)) { | |
606 | on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1); | |
607 | free_percpu_irq(irq, &hw_events->percpu_pmu); | |
608 | } else { | |
609 | for (i = 0; i < irqs; ++i) { | |
610 | int cpu = i; | |
611 | ||
612 | if (cpu_pmu->irq_affinity) | |
613 | cpu = cpu_pmu->irq_affinity[i]; | |
614 | ||
615 | if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs)) | |
616 | continue; | |
617 | irq = platform_get_irq(pmu_device, i); | |
618 | if (irq >= 0) | |
619 | free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu)); | |
620 | } | |
621 | } | |
622 | } | |
623 | ||
624 | static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) | |
625 | { | |
626 | int i, err, irq, irqs; | |
627 | struct platform_device *pmu_device = cpu_pmu->plat_device; | |
628 | struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events; | |
629 | ||
630 | if (!pmu_device) | |
631 | return -ENODEV; | |
632 | ||
633 | irqs = min(pmu_device->num_resources, num_possible_cpus()); | |
634 | if (irqs < 1) { | |
635 | pr_warn_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n"); | |
636 | return 0; | |
637 | } | |
638 | ||
639 | irq = platform_get_irq(pmu_device, 0); | |
640 | if (irq >= 0 && irq_is_percpu(irq)) { | |
641 | err = request_percpu_irq(irq, handler, "arm-pmu", | |
642 | &hw_events->percpu_pmu); | |
643 | if (err) { | |
644 | pr_err("unable to request IRQ%d for ARM PMU counters\n", | |
645 | irq); | |
646 | return err; | |
647 | } | |
648 | on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1); | |
649 | } else { | |
650 | for (i = 0; i < irqs; ++i) { | |
651 | int cpu = i; | |
652 | ||
653 | err = 0; | |
654 | irq = platform_get_irq(pmu_device, i); | |
655 | if (irq < 0) | |
656 | continue; | |
657 | ||
658 | if (cpu_pmu->irq_affinity) | |
659 | cpu = cpu_pmu->irq_affinity[i]; | |
660 | ||
661 | /* | |
662 | * If we have a single PMU interrupt that we can't shift, | |
663 | * assume that we're running on a uniprocessor machine and | |
664 | * continue. Otherwise, continue without this interrupt. | |
665 | */ | |
666 | if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) { | |
667 | pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", | |
668 | irq, cpu); | |
669 | continue; | |
670 | } | |
671 | ||
672 | err = request_irq(irq, handler, | |
673 | IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu", | |
674 | per_cpu_ptr(&hw_events->percpu_pmu, cpu)); | |
675 | if (err) { | |
676 | pr_err("unable to request IRQ%d for ARM PMU counters\n", | |
677 | irq); | |
678 | return err; | |
679 | } | |
680 | ||
681 | cpumask_set_cpu(cpu, &cpu_pmu->active_irqs); | |
682 | } | |
683 | } | |
684 | ||
685 | return 0; | |
686 | } | |
687 | ||
688 | /* | |
689 | * PMU hardware loses all context when a CPU goes offline. | |
690 | * When a CPU is hotplugged back in, since some hardware registers are | |
691 | * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading | |
692 | * junk values out of them. | |
693 | */ | |
694 | static int cpu_pmu_notify(struct notifier_block *b, unsigned long action, | |
695 | void *hcpu) | |
696 | { | |
697 | int cpu = (unsigned long)hcpu; | |
698 | struct arm_pmu *pmu = container_of(b, struct arm_pmu, hotplug_nb); | |
699 | ||
700 | if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) | |
701 | return NOTIFY_DONE; | |
702 | ||
703 | if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) | |
704 | return NOTIFY_DONE; | |
705 | ||
706 | if (pmu->reset) | |
707 | pmu->reset(pmu); | |
708 | else | |
709 | return NOTIFY_DONE; | |
710 | ||
711 | return NOTIFY_OK; | |
712 | } | |
713 | ||
da4e4f18 LP |
714 | #ifdef CONFIG_CPU_PM |
715 | static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd) | |
716 | { | |
717 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); | |
718 | struct perf_event *event; | |
719 | int idx; | |
720 | ||
721 | for (idx = 0; idx < armpmu->num_events; idx++) { | |
722 | /* | |
723 | * If the counter is not used skip it, there is no | |
724 | * need of stopping/restarting it. | |
725 | */ | |
726 | if (!test_bit(idx, hw_events->used_mask)) | |
727 | continue; | |
728 | ||
729 | event = hw_events->events[idx]; | |
730 | ||
731 | switch (cmd) { | |
732 | case CPU_PM_ENTER: | |
733 | /* | |
734 | * Stop and update the counter | |
735 | */ | |
736 | armpmu_stop(event, PERF_EF_UPDATE); | |
737 | break; | |
738 | case CPU_PM_EXIT: | |
739 | case CPU_PM_ENTER_FAILED: | |
cbcc72e0 LP |
740 | /* |
741 | * Restore and enable the counter. | |
742 | * armpmu_start() indirectly calls | |
743 | * | |
744 | * perf_event_update_userpage() | |
745 | * | |
746 | * that requires RCU read locking to be functional, | |
747 | * wrap the call within RCU_NONIDLE to make the | |
748 | * RCU subsystem aware this cpu is not idle from | |
749 | * an RCU perspective for the armpmu_start() call | |
750 | * duration. | |
751 | */ | |
752 | RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD)); | |
da4e4f18 LP |
753 | break; |
754 | default: | |
755 | break; | |
756 | } | |
757 | } | |
758 | } | |
759 | ||
760 | static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd, | |
761 | void *v) | |
762 | { | |
763 | struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb); | |
764 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); | |
765 | int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); | |
766 | ||
767 | if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) | |
768 | return NOTIFY_DONE; | |
769 | ||
770 | /* | |
771 | * Always reset the PMU registers on power-up even if | |
772 | * there are no events running. | |
773 | */ | |
774 | if (cmd == CPU_PM_EXIT && armpmu->reset) | |
775 | armpmu->reset(armpmu); | |
776 | ||
777 | if (!enabled) | |
778 | return NOTIFY_OK; | |
779 | ||
780 | switch (cmd) { | |
781 | case CPU_PM_ENTER: | |
782 | armpmu->stop(armpmu); | |
783 | cpu_pm_pmu_setup(armpmu, cmd); | |
784 | break; | |
785 | case CPU_PM_EXIT: | |
786 | cpu_pm_pmu_setup(armpmu, cmd); | |
787 | case CPU_PM_ENTER_FAILED: | |
788 | armpmu->start(armpmu); | |
789 | break; | |
790 | default: | |
791 | return NOTIFY_DONE; | |
792 | } | |
793 | ||
794 | return NOTIFY_OK; | |
795 | } | |
796 | ||
797 | static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) | |
798 | { | |
799 | cpu_pmu->cpu_pm_nb.notifier_call = cpu_pm_pmu_notify; | |
800 | return cpu_pm_register_notifier(&cpu_pmu->cpu_pm_nb); | |
801 | } | |
802 | ||
803 | static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) | |
804 | { | |
805 | cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb); | |
806 | } | |
807 | #else | |
808 | static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; } | |
809 | static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { } | |
810 | #endif | |
811 | ||
74cf0bc7 MR |
812 | static int cpu_pmu_init(struct arm_pmu *cpu_pmu) |
813 | { | |
814 | int err; | |
815 | int cpu; | |
816 | struct pmu_hw_events __percpu *cpu_hw_events; | |
817 | ||
818 | cpu_hw_events = alloc_percpu(struct pmu_hw_events); | |
819 | if (!cpu_hw_events) | |
820 | return -ENOMEM; | |
821 | ||
822 | cpu_pmu->hotplug_nb.notifier_call = cpu_pmu_notify; | |
823 | err = register_cpu_notifier(&cpu_pmu->hotplug_nb); | |
824 | if (err) | |
825 | goto out_hw_events; | |
826 | ||
da4e4f18 LP |
827 | err = cpu_pm_pmu_register(cpu_pmu); |
828 | if (err) | |
829 | goto out_unregister; | |
830 | ||
74cf0bc7 MR |
831 | for_each_possible_cpu(cpu) { |
832 | struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu); | |
833 | raw_spin_lock_init(&events->pmu_lock); | |
834 | events->percpu_pmu = cpu_pmu; | |
835 | } | |
836 | ||
837 | cpu_pmu->hw_events = cpu_hw_events; | |
838 | cpu_pmu->request_irq = cpu_pmu_request_irq; | |
839 | cpu_pmu->free_irq = cpu_pmu_free_irq; | |
840 | ||
841 | /* Ensure the PMU has sane values out of reset. */ | |
842 | if (cpu_pmu->reset) | |
843 | on_each_cpu_mask(&cpu_pmu->supported_cpus, cpu_pmu->reset, | |
844 | cpu_pmu, 1); | |
845 | ||
846 | /* If no interrupts available, set the corresponding capability flag */ | |
847 | if (!platform_get_irq(cpu_pmu->plat_device, 0)) | |
848 | cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; | |
849 | ||
5101ef20 MR |
850 | /* |
851 | * This is a CPU PMU potentially in a heterogeneous configuration (e.g. | |
852 | * big.LITTLE). This is not an uncore PMU, and we have taken ctx | |
853 | * sharing into account (e.g. with our pmu::filter_match callback and | |
854 | * pmu::event_init group validation). | |
855 | */ | |
856 | cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_HETEROGENEOUS_CPUS; | |
857 | ||
74cf0bc7 MR |
858 | return 0; |
859 | ||
da4e4f18 LP |
860 | out_unregister: |
861 | unregister_cpu_notifier(&cpu_pmu->hotplug_nb); | |
74cf0bc7 MR |
862 | out_hw_events: |
863 | free_percpu(cpu_hw_events); | |
864 | return err; | |
865 | } | |
866 | ||
867 | static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) | |
868 | { | |
da4e4f18 | 869 | cpu_pm_pmu_unregister(cpu_pmu); |
74cf0bc7 MR |
870 | unregister_cpu_notifier(&cpu_pmu->hotplug_nb); |
871 | free_percpu(cpu_pmu->hw_events); | |
872 | } | |
873 | ||
874 | /* | |
875 | * CPU PMU identification and probing. | |
876 | */ | |
877 | static int probe_current_pmu(struct arm_pmu *pmu, | |
878 | const struct pmu_probe_info *info) | |
879 | { | |
880 | int cpu = get_cpu(); | |
881 | unsigned int cpuid = read_cpuid_id(); | |
882 | int ret = -ENODEV; | |
883 | ||
884 | pr_info("probing PMU on CPU %d\n", cpu); | |
885 | ||
886 | for (; info->init != NULL; info++) { | |
887 | if ((cpuid & info->mask) != info->cpuid) | |
888 | continue; | |
889 | ret = info->init(pmu); | |
890 | break; | |
891 | } | |
892 | ||
893 | put_cpu(); | |
894 | return ret; | |
895 | } | |
896 | ||
897 | static int of_pmu_irq_cfg(struct arm_pmu *pmu) | |
898 | { | |
b6c084d7 WD |
899 | int *irqs, i = 0; |
900 | bool using_spi = false; | |
74cf0bc7 MR |
901 | struct platform_device *pdev = pmu->plat_device; |
902 | ||
74cf0bc7 MR |
903 | irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL); |
904 | if (!irqs) | |
905 | return -ENOMEM; | |
906 | ||
b6c084d7 | 907 | do { |
74cf0bc7 | 908 | struct device_node *dn; |
b6c084d7 | 909 | int cpu, irq; |
74cf0bc7 | 910 | |
b6c084d7 WD |
911 | /* See if we have an affinity entry */ |
912 | dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity", i); | |
913 | if (!dn) | |
74cf0bc7 | 914 | break; |
b6c084d7 WD |
915 | |
916 | /* Check the IRQ type and prohibit a mix of PPIs and SPIs */ | |
917 | irq = platform_get_irq(pdev, i); | |
918 | if (irq >= 0) { | |
919 | bool spi = !irq_is_percpu(irq); | |
920 | ||
921 | if (i > 0 && spi != using_spi) { | |
922 | pr_err("PPI/SPI IRQ type mismatch for %s!\n", | |
923 | dn->name); | |
924 | kfree(irqs); | |
925 | return -EINVAL; | |
926 | } | |
927 | ||
928 | using_spi = spi; | |
74cf0bc7 MR |
929 | } |
930 | ||
b6c084d7 | 931 | /* Now look up the logical CPU number */ |
fb659882 WD |
932 | for_each_possible_cpu(cpu) { |
933 | struct device_node *cpu_dn; | |
934 | ||
935 | cpu_dn = of_cpu_device_node_get(cpu); | |
936 | of_node_put(cpu_dn); | |
937 | ||
938 | if (dn == cpu_dn) | |
74cf0bc7 | 939 | break; |
fb659882 | 940 | } |
74cf0bc7 | 941 | |
74cf0bc7 MR |
942 | if (cpu >= nr_cpu_ids) { |
943 | pr_warn("Failed to find logical CPU for %s\n", | |
944 | dn->name); | |
8e0c34b0 | 945 | of_node_put(dn); |
b6c084d7 | 946 | cpumask_setall(&pmu->supported_cpus); |
74cf0bc7 MR |
947 | break; |
948 | } | |
8e0c34b0 | 949 | of_node_put(dn); |
74cf0bc7 | 950 | |
b6c084d7 WD |
951 | /* For SPIs, we need to track the affinity per IRQ */ |
952 | if (using_spi) { | |
121323ae | 953 | if (i >= pdev->num_resources) |
b6c084d7 | 954 | break; |
b6c084d7 WD |
955 | |
956 | irqs[i] = cpu; | |
957 | } | |
958 | ||
959 | /* Keep track of the CPUs containing this PMU type */ | |
74cf0bc7 | 960 | cpumask_set_cpu(cpu, &pmu->supported_cpus); |
b6c084d7 WD |
961 | i++; |
962 | } while (1); | |
74cf0bc7 | 963 | |
b6c084d7 WD |
964 | /* If we didn't manage to parse anything, claim to support all CPUs */ |
965 | if (cpumask_weight(&pmu->supported_cpus) == 0) | |
966 | cpumask_setall(&pmu->supported_cpus); | |
967 | ||
968 | /* If we matched up the IRQ affinities, use them to route the SPIs */ | |
969 | if (using_spi && i == pdev->num_resources) | |
74cf0bc7 | 970 | pmu->irq_affinity = irqs; |
b6c084d7 | 971 | else |
74cf0bc7 | 972 | kfree(irqs); |
74cf0bc7 MR |
973 | |
974 | return 0; | |
975 | } | |
976 | ||
977 | int arm_pmu_device_probe(struct platform_device *pdev, | |
978 | const struct of_device_id *of_table, | |
979 | const struct pmu_probe_info *probe_table) | |
980 | { | |
981 | const struct of_device_id *of_id; | |
982 | const int (*init_fn)(struct arm_pmu *); | |
983 | struct device_node *node = pdev->dev.of_node; | |
984 | struct arm_pmu *pmu; | |
985 | int ret = -ENODEV; | |
986 | ||
987 | pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL); | |
988 | if (!pmu) { | |
989 | pr_info("failed to allocate PMU device!\n"); | |
990 | return -ENOMEM; | |
991 | } | |
992 | ||
b916b785 MR |
993 | armpmu_init(pmu); |
994 | ||
74cf0bc7 MR |
995 | pmu->plat_device = pdev; |
996 | ||
997 | if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) { | |
998 | init_fn = of_id->data; | |
999 | ||
8d1a0ae7 MF |
1000 | pmu->secure_access = of_property_read_bool(pdev->dev.of_node, |
1001 | "secure-reg-access"); | |
1002 | ||
1003 | /* arm64 systems boot only as non-secure */ | |
1004 | if (IS_ENABLED(CONFIG_ARM64) && pmu->secure_access) { | |
1005 | pr_warn("ignoring \"secure-reg-access\" property for arm64\n"); | |
1006 | pmu->secure_access = false; | |
1007 | } | |
1008 | ||
74cf0bc7 MR |
1009 | ret = of_pmu_irq_cfg(pmu); |
1010 | if (!ret) | |
1011 | ret = init_fn(pmu); | |
1012 | } else { | |
74cf0bc7 | 1013 | cpumask_setall(&pmu->supported_cpus); |
f7a6c149 | 1014 | ret = probe_current_pmu(pmu, probe_table); |
74cf0bc7 MR |
1015 | } |
1016 | ||
1017 | if (ret) { | |
357b565d | 1018 | pr_info("%s: failed to probe PMU!\n", of_node_full_name(node)); |
74cf0bc7 MR |
1019 | goto out_free; |
1020 | } | |
1021 | ||
1022 | ret = cpu_pmu_init(pmu); | |
1023 | if (ret) | |
1024 | goto out_free; | |
1025 | ||
b916b785 | 1026 | ret = perf_pmu_register(&pmu->pmu, pmu->name, -1); |
74cf0bc7 MR |
1027 | if (ret) |
1028 | goto out_destroy; | |
1029 | ||
0f254c76 JG |
1030 | if (!__oprofile_cpu_pmu) |
1031 | __oprofile_cpu_pmu = pmu; | |
1032 | ||
b916b785 MR |
1033 | pr_info("enabled with %s PMU driver, %d counters available\n", |
1034 | pmu->name, pmu->num_events); | |
1035 | ||
74cf0bc7 MR |
1036 | return 0; |
1037 | ||
1038 | out_destroy: | |
1039 | cpu_pmu_destroy(pmu); | |
1040 | out_free: | |
357b565d WD |
1041 | pr_info("%s: failed to register PMU devices!\n", |
1042 | of_node_full_name(node)); | |
5988a363 | 1043 | kfree(pmu->irq_affinity); |
74cf0bc7 MR |
1044 | kfree(pmu); |
1045 | return ret; | |
1046 | } |