Commit | Line | Data |
---|---|---|
1b8873a0 JI |
1 | #undef DEBUG |
2 | ||
3 | /* | |
4 | * ARM performance counter support. | |
5 | * | |
6 | * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles | |
43eab878 | 7 | * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com> |
796d1295 | 8 | * |
1b8873a0 | 9 | * This code is based on the sparc64 perf event code, which is in turn based |
d39976f0 | 10 | * on the x86 code. |
1b8873a0 JI |
11 | */ |
12 | #define pr_fmt(fmt) "hw perfevents: " fmt | |
13 | ||
1b8873a0 | 14 | #include <linux/kernel.h> |
49c006b9 | 15 | #include <linux/platform_device.h> |
7be2958e | 16 | #include <linux/pm_runtime.h> |
bbd64559 SB |
17 | #include <linux/irq.h> |
18 | #include <linux/irqdesc.h> | |
1b8873a0 | 19 | |
1b8873a0 JI |
20 | #include <asm/irq_regs.h> |
21 | #include <asm/pmu.h> | |
1b8873a0 | 22 | |
1b8873a0 | 23 | static int |
e1f431b5 MR |
24 | armpmu_map_cache_event(const unsigned (*cache_map) |
25 | [PERF_COUNT_HW_CACHE_MAX] | |
26 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
27 | [PERF_COUNT_HW_CACHE_RESULT_MAX], | |
28 | u64 config) | |
1b8873a0 JI |
29 | { |
30 | unsigned int cache_type, cache_op, cache_result, ret; | |
31 | ||
32 | cache_type = (config >> 0) & 0xff; | |
33 | if (cache_type >= PERF_COUNT_HW_CACHE_MAX) | |
34 | return -EINVAL; | |
35 | ||
36 | cache_op = (config >> 8) & 0xff; | |
37 | if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) | |
38 | return -EINVAL; | |
39 | ||
40 | cache_result = (config >> 16) & 0xff; | |
41 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | |
42 | return -EINVAL; | |
43 | ||
e1f431b5 | 44 | ret = (int)(*cache_map)[cache_type][cache_op][cache_result]; |
1b8873a0 JI |
45 | |
46 | if (ret == CACHE_OP_UNSUPPORTED) | |
47 | return -ENOENT; | |
48 | ||
49 | return ret; | |
50 | } | |
51 | ||
84fee97a | 52 | static int |
6dbc0029 | 53 | armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) |
84fee97a | 54 | { |
d9f96635 SB |
55 | int mapping; |
56 | ||
57 | if (config >= PERF_COUNT_HW_MAX) | |
58 | return -EINVAL; | |
59 | ||
60 | mapping = (*event_map)[config]; | |
e1f431b5 | 61 | return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; |
84fee97a WD |
62 | } |
63 | ||
64 | static int | |
e1f431b5 | 65 | armpmu_map_raw_event(u32 raw_event_mask, u64 config) |
84fee97a | 66 | { |
e1f431b5 MR |
67 | return (int)(config & raw_event_mask); |
68 | } | |
69 | ||
6dbc0029 WD |
70 | int |
71 | armpmu_map_event(struct perf_event *event, | |
72 | const unsigned (*event_map)[PERF_COUNT_HW_MAX], | |
73 | const unsigned (*cache_map) | |
74 | [PERF_COUNT_HW_CACHE_MAX] | |
75 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
76 | [PERF_COUNT_HW_CACHE_RESULT_MAX], | |
77 | u32 raw_event_mask) | |
e1f431b5 MR |
78 | { |
79 | u64 config = event->attr.config; | |
67b4305a | 80 | int type = event->attr.type; |
e1f431b5 | 81 | |
67b4305a MR |
82 | if (type == event->pmu->type) |
83 | return armpmu_map_raw_event(raw_event_mask, config); | |
84 | ||
85 | switch (type) { | |
e1f431b5 | 86 | case PERF_TYPE_HARDWARE: |
6dbc0029 | 87 | return armpmu_map_hw_event(event_map, config); |
e1f431b5 MR |
88 | case PERF_TYPE_HW_CACHE: |
89 | return armpmu_map_cache_event(cache_map, config); | |
90 | case PERF_TYPE_RAW: | |
91 | return armpmu_map_raw_event(raw_event_mask, config); | |
92 | } | |
93 | ||
94 | return -ENOENT; | |
84fee97a WD |
95 | } |
96 | ||
ed6f2a52 | 97 | int armpmu_event_set_period(struct perf_event *event) |
1b8873a0 | 98 | { |
8a16b34e | 99 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
ed6f2a52 | 100 | struct hw_perf_event *hwc = &event->hw; |
e7850595 | 101 | s64 left = local64_read(&hwc->period_left); |
1b8873a0 JI |
102 | s64 period = hwc->sample_period; |
103 | int ret = 0; | |
104 | ||
105 | if (unlikely(left <= -period)) { | |
106 | left = period; | |
e7850595 | 107 | local64_set(&hwc->period_left, left); |
1b8873a0 JI |
108 | hwc->last_period = period; |
109 | ret = 1; | |
110 | } | |
111 | ||
112 | if (unlikely(left <= 0)) { | |
113 | left += period; | |
e7850595 | 114 | local64_set(&hwc->period_left, left); |
1b8873a0 JI |
115 | hwc->last_period = period; |
116 | ret = 1; | |
117 | } | |
118 | ||
2d9ed740 DT |
119 | /* |
120 | * Limit the maximum period to prevent the counter value | |
121 | * from overtaking the one we are about to program. In | |
122 | * effect we are reducing max_period to account for | |
123 | * interrupt latency (and we are being very conservative). | |
124 | */ | |
125 | if (left > (armpmu->max_period >> 1)) | |
126 | left = armpmu->max_period >> 1; | |
1b8873a0 | 127 | |
e7850595 | 128 | local64_set(&hwc->prev_count, (u64)-left); |
1b8873a0 | 129 | |
ed6f2a52 | 130 | armpmu->write_counter(event, (u64)(-left) & 0xffffffff); |
1b8873a0 JI |
131 | |
132 | perf_event_update_userpage(event); | |
133 | ||
134 | return ret; | |
135 | } | |
136 | ||
ed6f2a52 | 137 | u64 armpmu_event_update(struct perf_event *event) |
1b8873a0 | 138 | { |
8a16b34e | 139 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
ed6f2a52 | 140 | struct hw_perf_event *hwc = &event->hw; |
a737823d | 141 | u64 delta, prev_raw_count, new_raw_count; |
1b8873a0 JI |
142 | |
143 | again: | |
e7850595 | 144 | prev_raw_count = local64_read(&hwc->prev_count); |
ed6f2a52 | 145 | new_raw_count = armpmu->read_counter(event); |
1b8873a0 | 146 | |
e7850595 | 147 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, |
1b8873a0 JI |
148 | new_raw_count) != prev_raw_count) |
149 | goto again; | |
150 | ||
57273471 | 151 | delta = (new_raw_count - prev_raw_count) & armpmu->max_period; |
1b8873a0 | 152 | |
e7850595 PZ |
153 | local64_add(delta, &event->count); |
154 | local64_sub(delta, &hwc->period_left); | |
1b8873a0 JI |
155 | |
156 | return new_raw_count; | |
157 | } | |
158 | ||
159 | static void | |
a4eaf7f1 | 160 | armpmu_read(struct perf_event *event) |
1b8873a0 | 161 | { |
ed6f2a52 | 162 | armpmu_event_update(event); |
1b8873a0 JI |
163 | } |
164 | ||
165 | static void | |
a4eaf7f1 | 166 | armpmu_stop(struct perf_event *event, int flags) |
1b8873a0 | 167 | { |
8a16b34e | 168 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
1b8873a0 JI |
169 | struct hw_perf_event *hwc = &event->hw; |
170 | ||
a4eaf7f1 PZ |
171 | /* |
172 | * ARM pmu always has to update the counter, so ignore | |
173 | * PERF_EF_UPDATE, see comments in armpmu_start(). | |
174 | */ | |
175 | if (!(hwc->state & PERF_HES_STOPPED)) { | |
ed6f2a52 SK |
176 | armpmu->disable(event); |
177 | armpmu_event_update(event); | |
a4eaf7f1 PZ |
178 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; |
179 | } | |
1b8873a0 JI |
180 | } |
181 | ||
ed6f2a52 | 182 | static void armpmu_start(struct perf_event *event, int flags) |
1b8873a0 | 183 | { |
8a16b34e | 184 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
1b8873a0 JI |
185 | struct hw_perf_event *hwc = &event->hw; |
186 | ||
a4eaf7f1 PZ |
187 | /* |
188 | * ARM pmu always has to reprogram the period, so ignore | |
189 | * PERF_EF_RELOAD, see the comment below. | |
190 | */ | |
191 | if (flags & PERF_EF_RELOAD) | |
192 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | |
193 | ||
194 | hwc->state = 0; | |
1b8873a0 JI |
195 | /* |
196 | * Set the period again. Some counters can't be stopped, so when we | |
a4eaf7f1 | 197 | * were stopped we simply disabled the IRQ source and the counter |
1b8873a0 JI |
198 | * may have been left counting. If we don't do this step then we may |
199 | * get an interrupt too soon or *way* too late if the overflow has | |
200 | * happened since disabling. | |
201 | */ | |
ed6f2a52 SK |
202 | armpmu_event_set_period(event); |
203 | armpmu->enable(event); | |
1b8873a0 JI |
204 | } |
205 | ||
a4eaf7f1 PZ |
206 | static void |
207 | armpmu_del(struct perf_event *event, int flags) | |
208 | { | |
8a16b34e | 209 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
11679250 | 210 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); |
a4eaf7f1 PZ |
211 | struct hw_perf_event *hwc = &event->hw; |
212 | int idx = hwc->idx; | |
213 | ||
a4eaf7f1 | 214 | armpmu_stop(event, PERF_EF_UPDATE); |
8be3f9a2 MR |
215 | hw_events->events[idx] = NULL; |
216 | clear_bit(idx, hw_events->used_mask); | |
eab443ef SB |
217 | if (armpmu->clear_event_idx) |
218 | armpmu->clear_event_idx(hw_events, event); | |
a4eaf7f1 PZ |
219 | |
220 | perf_event_update_userpage(event); | |
221 | } | |
222 | ||
1b8873a0 | 223 | static int |
a4eaf7f1 | 224 | armpmu_add(struct perf_event *event, int flags) |
1b8873a0 | 225 | { |
8a16b34e | 226 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
11679250 | 227 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); |
1b8873a0 JI |
228 | struct hw_perf_event *hwc = &event->hw; |
229 | int idx; | |
230 | int err = 0; | |
231 | ||
33696fc0 | 232 | perf_pmu_disable(event->pmu); |
24cd7f54 | 233 | |
1b8873a0 | 234 | /* If we don't have a space for the counter then finish early. */ |
ed6f2a52 | 235 | idx = armpmu->get_event_idx(hw_events, event); |
1b8873a0 JI |
236 | if (idx < 0) { |
237 | err = idx; | |
238 | goto out; | |
239 | } | |
240 | ||
241 | /* | |
242 | * If there is an event in the counter we are going to use then make | |
243 | * sure it is disabled. | |
244 | */ | |
245 | event->hw.idx = idx; | |
ed6f2a52 | 246 | armpmu->disable(event); |
8be3f9a2 | 247 | hw_events->events[idx] = event; |
1b8873a0 | 248 | |
a4eaf7f1 PZ |
249 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; |
250 | if (flags & PERF_EF_START) | |
251 | armpmu_start(event, PERF_EF_RELOAD); | |
1b8873a0 JI |
252 | |
253 | /* Propagate our changes to the userspace mapping. */ | |
254 | perf_event_update_userpage(event); | |
255 | ||
256 | out: | |
33696fc0 | 257 | perf_pmu_enable(event->pmu); |
1b8873a0 JI |
258 | return err; |
259 | } | |
260 | ||
1b8873a0 | 261 | static int |
e429817b SP |
262 | validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events, |
263 | struct perf_event *event) | |
1b8873a0 | 264 | { |
e429817b | 265 | struct arm_pmu *armpmu; |
1b8873a0 | 266 | |
c95eb318 WD |
267 | if (is_software_event(event)) |
268 | return 1; | |
269 | ||
e429817b SP |
270 | /* |
271 | * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The | |
272 | * core perf code won't check that the pmu->ctx == leader->ctx | |
273 | * until after pmu->event_init(event). | |
274 | */ | |
275 | if (event->pmu != pmu) | |
276 | return 0; | |
277 | ||
2dfcb802 | 278 | if (event->state < PERF_EVENT_STATE_OFF) |
cb2d8b34 WD |
279 | return 1; |
280 | ||
281 | if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) | |
65b4711f | 282 | return 1; |
1b8873a0 | 283 | |
e429817b | 284 | armpmu = to_arm_pmu(event->pmu); |
ed6f2a52 | 285 | return armpmu->get_event_idx(hw_events, event) >= 0; |
1b8873a0 JI |
286 | } |
287 | ||
288 | static int | |
289 | validate_group(struct perf_event *event) | |
290 | { | |
291 | struct perf_event *sibling, *leader = event->group_leader; | |
8be3f9a2 | 292 | struct pmu_hw_events fake_pmu; |
1b8873a0 | 293 | |
bce34d14 WD |
294 | /* |
295 | * Initialise the fake PMU. We only need to populate the | |
296 | * used_mask for the purposes of validation. | |
297 | */ | |
a4560846 | 298 | memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask)); |
1b8873a0 | 299 | |
e429817b | 300 | if (!validate_event(event->pmu, &fake_pmu, leader)) |
aa2bc1ad | 301 | return -EINVAL; |
1b8873a0 JI |
302 | |
303 | list_for_each_entry(sibling, &leader->sibling_list, group_entry) { | |
e429817b | 304 | if (!validate_event(event->pmu, &fake_pmu, sibling)) |
aa2bc1ad | 305 | return -EINVAL; |
1b8873a0 JI |
306 | } |
307 | ||
e429817b | 308 | if (!validate_event(event->pmu, &fake_pmu, event)) |
aa2bc1ad | 309 | return -EINVAL; |
1b8873a0 JI |
310 | |
311 | return 0; | |
312 | } | |
313 | ||
051f1b13 | 314 | static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) |
0e25a5c9 | 315 | { |
bbd64559 SB |
316 | struct arm_pmu *armpmu; |
317 | struct platform_device *plat_device; | |
318 | struct arm_pmu_platdata *plat; | |
5f5092e7 WD |
319 | int ret; |
320 | u64 start_clock, finish_clock; | |
bbd64559 | 321 | |
5ebd9200 MR |
322 | /* |
323 | * we request the IRQ with a (possibly percpu) struct arm_pmu**, but | |
324 | * the handlers expect a struct arm_pmu*. The percpu_irq framework will | |
325 | * do any necessary shifting, we just need to perform the first | |
326 | * dereference. | |
327 | */ | |
328 | armpmu = *(void **)dev; | |
bbd64559 SB |
329 | plat_device = armpmu->plat_device; |
330 | plat = dev_get_platdata(&plat_device->dev); | |
0e25a5c9 | 331 | |
5f5092e7 | 332 | start_clock = sched_clock(); |
051f1b13 | 333 | if (plat && plat->handle_irq) |
5ebd9200 | 334 | ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq); |
051f1b13 | 335 | else |
5ebd9200 | 336 | ret = armpmu->handle_irq(irq, armpmu); |
5f5092e7 WD |
337 | finish_clock = sched_clock(); |
338 | ||
339 | perf_sample_event_took(finish_clock - start_clock); | |
340 | return ret; | |
0e25a5c9 RV |
341 | } |
342 | ||
0b390e21 | 343 | static void |
8a16b34e | 344 | armpmu_release_hardware(struct arm_pmu *armpmu) |
0b390e21 | 345 | { |
ed6f2a52 | 346 | armpmu->free_irq(armpmu); |
051f1b13 | 347 | pm_runtime_put_sync(&armpmu->plat_device->dev); |
0b390e21 WD |
348 | } |
349 | ||
1b8873a0 | 350 | static int |
8a16b34e | 351 | armpmu_reserve_hardware(struct arm_pmu *armpmu) |
1b8873a0 | 352 | { |
051f1b13 | 353 | int err; |
a9356a04 | 354 | struct platform_device *pmu_device = armpmu->plat_device; |
1b8873a0 | 355 | |
e5a21327 WD |
356 | if (!pmu_device) |
357 | return -ENODEV; | |
358 | ||
7be2958e | 359 | pm_runtime_get_sync(&pmu_device->dev); |
ed6f2a52 | 360 | err = armpmu->request_irq(armpmu, armpmu_dispatch_irq); |
051f1b13 SK |
361 | if (err) { |
362 | armpmu_release_hardware(armpmu); | |
363 | return err; | |
49c006b9 | 364 | } |
1b8873a0 | 365 | |
0b390e21 | 366 | return 0; |
1b8873a0 JI |
367 | } |
368 | ||
1b8873a0 JI |
369 | static void |
370 | hw_perf_event_destroy(struct perf_event *event) | |
371 | { | |
8a16b34e | 372 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
03b7898d MR |
373 | atomic_t *active_events = &armpmu->active_events; |
374 | struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex; | |
375 | ||
376 | if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) { | |
8a16b34e | 377 | armpmu_release_hardware(armpmu); |
03b7898d | 378 | mutex_unlock(pmu_reserve_mutex); |
1b8873a0 JI |
379 | } |
380 | } | |
381 | ||
05d22fde WD |
382 | static int |
383 | event_requires_mode_exclusion(struct perf_event_attr *attr) | |
384 | { | |
385 | return attr->exclude_idle || attr->exclude_user || | |
386 | attr->exclude_kernel || attr->exclude_hv; | |
387 | } | |
388 | ||
1b8873a0 JI |
389 | static int |
390 | __hw_perf_event_init(struct perf_event *event) | |
391 | { | |
8a16b34e | 392 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
1b8873a0 | 393 | struct hw_perf_event *hwc = &event->hw; |
9dcbf466 | 394 | int mapping; |
1b8873a0 | 395 | |
e1f431b5 | 396 | mapping = armpmu->map_event(event); |
1b8873a0 JI |
397 | |
398 | if (mapping < 0) { | |
399 | pr_debug("event %x:%llx not supported\n", event->attr.type, | |
400 | event->attr.config); | |
401 | return mapping; | |
402 | } | |
403 | ||
05d22fde WD |
404 | /* |
405 | * We don't assign an index until we actually place the event onto | |
406 | * hardware. Use -1 to signify that we haven't decided where to put it | |
407 | * yet. For SMP systems, each core has it's own PMU so we can't do any | |
408 | * clever allocation or constraints checking at this point. | |
409 | */ | |
410 | hwc->idx = -1; | |
411 | hwc->config_base = 0; | |
412 | hwc->config = 0; | |
413 | hwc->event_base = 0; | |
414 | ||
1b8873a0 JI |
415 | /* |
416 | * Check whether we need to exclude the counter from certain modes. | |
1b8873a0 | 417 | */ |
05d22fde WD |
418 | if ((!armpmu->set_event_filter || |
419 | armpmu->set_event_filter(hwc, &event->attr)) && | |
420 | event_requires_mode_exclusion(&event->attr)) { | |
1b8873a0 JI |
421 | pr_debug("ARM performance counters do not support " |
422 | "mode exclusion\n"); | |
fdeb8e35 | 423 | return -EOPNOTSUPP; |
1b8873a0 JI |
424 | } |
425 | ||
426 | /* | |
05d22fde | 427 | * Store the event encoding into the config_base field. |
1b8873a0 | 428 | */ |
05d22fde | 429 | hwc->config_base |= (unsigned long)mapping; |
1b8873a0 | 430 | |
edcb4d3c | 431 | if (!is_sampling_event(event)) { |
57273471 WD |
432 | /* |
433 | * For non-sampling runs, limit the sample_period to half | |
434 | * of the counter width. That way, the new counter value | |
435 | * is far less likely to overtake the previous one unless | |
436 | * you have some serious IRQ latency issues. | |
437 | */ | |
438 | hwc->sample_period = armpmu->max_period >> 1; | |
1b8873a0 | 439 | hwc->last_period = hwc->sample_period; |
e7850595 | 440 | local64_set(&hwc->period_left, hwc->sample_period); |
1b8873a0 JI |
441 | } |
442 | ||
1b8873a0 | 443 | if (event->group_leader != event) { |
e595ede6 | 444 | if (validate_group(event) != 0) |
1b8873a0 JI |
445 | return -EINVAL; |
446 | } | |
447 | ||
9dcbf466 | 448 | return 0; |
1b8873a0 JI |
449 | } |
450 | ||
b0a873eb | 451 | static int armpmu_event_init(struct perf_event *event) |
1b8873a0 | 452 | { |
8a16b34e | 453 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
1b8873a0 | 454 | int err = 0; |
03b7898d | 455 | atomic_t *active_events = &armpmu->active_events; |
1b8873a0 | 456 | |
2481c5fa SE |
457 | /* does not support taken branch sampling */ |
458 | if (has_branch_stack(event)) | |
459 | return -EOPNOTSUPP; | |
460 | ||
e1f431b5 | 461 | if (armpmu->map_event(event) == -ENOENT) |
b0a873eb | 462 | return -ENOENT; |
b0a873eb | 463 | |
1b8873a0 JI |
464 | event->destroy = hw_perf_event_destroy; |
465 | ||
03b7898d MR |
466 | if (!atomic_inc_not_zero(active_events)) { |
467 | mutex_lock(&armpmu->reserve_mutex); | |
468 | if (atomic_read(active_events) == 0) | |
8a16b34e | 469 | err = armpmu_reserve_hardware(armpmu); |
1b8873a0 JI |
470 | |
471 | if (!err) | |
03b7898d MR |
472 | atomic_inc(active_events); |
473 | mutex_unlock(&armpmu->reserve_mutex); | |
1b8873a0 JI |
474 | } |
475 | ||
476 | if (err) | |
b0a873eb | 477 | return err; |
1b8873a0 JI |
478 | |
479 | err = __hw_perf_event_init(event); | |
480 | if (err) | |
481 | hw_perf_event_destroy(event); | |
482 | ||
b0a873eb | 483 | return err; |
1b8873a0 JI |
484 | } |
485 | ||
a4eaf7f1 | 486 | static void armpmu_enable(struct pmu *pmu) |
1b8873a0 | 487 | { |
8be3f9a2 | 488 | struct arm_pmu *armpmu = to_arm_pmu(pmu); |
11679250 | 489 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); |
7325eaec | 490 | int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); |
1b8873a0 | 491 | |
f4f38430 | 492 | if (enabled) |
ed6f2a52 | 493 | armpmu->start(armpmu); |
1b8873a0 JI |
494 | } |
495 | ||
a4eaf7f1 | 496 | static void armpmu_disable(struct pmu *pmu) |
1b8873a0 | 497 | { |
8a16b34e | 498 | struct arm_pmu *armpmu = to_arm_pmu(pmu); |
ed6f2a52 | 499 | armpmu->stop(armpmu); |
1b8873a0 JI |
500 | } |
501 | ||
bf7c5449 | 502 | #ifdef CONFIG_PM |
7be2958e JH |
503 | static int armpmu_runtime_resume(struct device *dev) |
504 | { | |
505 | struct arm_pmu_platdata *plat = dev_get_platdata(dev); | |
506 | ||
507 | if (plat && plat->runtime_resume) | |
508 | return plat->runtime_resume(dev); | |
509 | ||
510 | return 0; | |
511 | } | |
512 | ||
513 | static int armpmu_runtime_suspend(struct device *dev) | |
514 | { | |
515 | struct arm_pmu_platdata *plat = dev_get_platdata(dev); | |
516 | ||
517 | if (plat && plat->runtime_suspend) | |
518 | return plat->runtime_suspend(dev); | |
519 | ||
520 | return 0; | |
521 | } | |
522 | #endif | |
523 | ||
6dbc0029 WD |
524 | const struct dev_pm_ops armpmu_dev_pm_ops = { |
525 | SET_RUNTIME_PM_OPS(armpmu_runtime_suspend, armpmu_runtime_resume, NULL) | |
526 | }; | |
527 | ||
44d6b1fc | 528 | static void armpmu_init(struct arm_pmu *armpmu) |
03b7898d MR |
529 | { |
530 | atomic_set(&armpmu->active_events, 0); | |
531 | mutex_init(&armpmu->reserve_mutex); | |
8a16b34e MR |
532 | |
533 | armpmu->pmu = (struct pmu) { | |
534 | .pmu_enable = armpmu_enable, | |
535 | .pmu_disable = armpmu_disable, | |
536 | .event_init = armpmu_event_init, | |
537 | .add = armpmu_add, | |
538 | .del = armpmu_del, | |
539 | .start = armpmu_start, | |
540 | .stop = armpmu_stop, | |
541 | .read = armpmu_read, | |
542 | }; | |
543 | } | |
544 | ||
0305230a | 545 | int armpmu_register(struct arm_pmu *armpmu, int type) |
8a16b34e MR |
546 | { |
547 | armpmu_init(armpmu); | |
2ac29a14 | 548 | pm_runtime_enable(&armpmu->plat_device->dev); |
04236f9f WD |
549 | pr_info("enabled with %s PMU driver, %d counters available\n", |
550 | armpmu->name, armpmu->num_events); | |
0305230a | 551 | return perf_pmu_register(&armpmu->pmu, armpmu->name, type); |
03b7898d MR |
552 | } |
553 |