Commit | Line | Data |
---|---|---|
ac44e669 PM |
1 | /* |
2 | * Performance event support framework for SuperH hardware counters. | |
3 | * | |
4 | * Copyright (C) 2009 Paul Mundt | |
5 | * | |
6 | * Heavily based on the x86 and PowerPC implementations. | |
7 | * | |
8 | * x86: | |
9 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> | |
10 | * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar | |
11 | * Copyright (C) 2009 Jaswinder Singh Rajput | |
12 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter | |
90eec103 | 13 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra |
ac44e669 PM |
14 | * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> |
15 | * | |
16 | * ppc: | |
17 | * Copyright 2008-2009 Paul Mackerras, IBM Corporation. | |
18 | * | |
19 | * This file is subject to the terms and conditions of the GNU General Public | |
20 | * License. See the file "COPYING" in the main directory of this archive | |
21 | * for more details. | |
22 | */ | |
23 | #include <linux/kernel.h> | |
24 | #include <linux/init.h> | |
25 | #include <linux/io.h> | |
26 | #include <linux/irq.h> | |
27 | #include <linux/perf_event.h> | |
f7be3455 | 28 | #include <linux/export.h> |
ac44e669 PM |
29 | #include <asm/processor.h> |
30 | ||
31 | struct cpu_hw_events { | |
32 | struct perf_event *events[MAX_HWEVENTS]; | |
33 | unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; | |
34 | unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; | |
35 | }; | |
36 | ||
37 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); | |
38 | ||
39 | static struct sh_pmu *sh_pmu __read_mostly; | |
40 | ||
41 | /* Number of perf_events counting hardware events */ | |
42 | static atomic_t num_events; | |
43 | /* Used to avoid races in calling reserve/release_pmc_hardware */ | |
44 | static DEFINE_MUTEX(pmc_reserve_mutex); | |
45 | ||
46 | /* | |
47 | * Stub these out for now, do something more profound later. | |
48 | */ | |
49 | int reserve_pmc_hardware(void) | |
50 | { | |
51 | return 0; | |
52 | } | |
53 | ||
54 | void release_pmc_hardware(void) | |
55 | { | |
56 | } | |
57 | ||
58 | static inline int sh_pmu_initialized(void) | |
59 | { | |
60 | return !!sh_pmu; | |
61 | } | |
62 | ||
84c79910 MF |
63 | const char *perf_pmu_name(void) |
64 | { | |
65 | if (!sh_pmu) | |
66 | return NULL; | |
67 | ||
68 | return sh_pmu->name; | |
69 | } | |
70 | EXPORT_SYMBOL_GPL(perf_pmu_name); | |
71 | ||
3bf101ba MF |
72 | int perf_num_counters(void) |
73 | { | |
74 | if (!sh_pmu) | |
75 | return 0; | |
76 | ||
77 | return sh_pmu->num_events; | |
78 | } | |
79 | EXPORT_SYMBOL_GPL(perf_num_counters); | |
80 | ||
ac44e669 PM |
81 | /* |
82 | * Release the PMU if this is the last perf_event. | |
83 | */ | |
84 | static void hw_perf_event_destroy(struct perf_event *event) | |
85 | { | |
86 | if (!atomic_add_unless(&num_events, -1, 1)) { | |
87 | mutex_lock(&pmc_reserve_mutex); | |
88 | if (atomic_dec_return(&num_events) == 0) | |
89 | release_pmc_hardware(); | |
90 | mutex_unlock(&pmc_reserve_mutex); | |
91 | } | |
92 | } | |
93 | ||
94 | static int hw_perf_cache_event(int config, int *evp) | |
95 | { | |
96 | unsigned long type, op, result; | |
97 | int ev; | |
98 | ||
99 | if (!sh_pmu->cache_events) | |
100 | return -EINVAL; | |
101 | ||
102 | /* unpack config */ | |
103 | type = config & 0xff; | |
104 | op = (config >> 8) & 0xff; | |
105 | result = (config >> 16) & 0xff; | |
106 | ||
107 | if (type >= PERF_COUNT_HW_CACHE_MAX || | |
108 | op >= PERF_COUNT_HW_CACHE_OP_MAX || | |
109 | result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | |
110 | return -EINVAL; | |
111 | ||
112 | ev = (*sh_pmu->cache_events)[type][op][result]; | |
113 | if (ev == 0) | |
114 | return -EOPNOTSUPP; | |
115 | if (ev == -1) | |
116 | return -EINVAL; | |
117 | *evp = ev; | |
118 | return 0; | |
119 | } | |
120 | ||
121 | static int __hw_perf_event_init(struct perf_event *event) | |
122 | { | |
123 | struct perf_event_attr *attr = &event->attr; | |
124 | struct hw_perf_event *hwc = &event->hw; | |
8820002c | 125 | int config = -1; |
ac44e669 PM |
126 | int err; |
127 | ||
128 | if (!sh_pmu_initialized()) | |
129 | return -ENODEV; | |
130 | ||
ac44e669 PM |
131 | /* |
132 | * See if we need to reserve the counter. | |
133 | * | |
134 | * If no events are currently in use, then we have to take a | |
135 | * mutex to ensure that we don't race with another task doing | |
136 | * reserve_pmc_hardware or release_pmc_hardware. | |
137 | */ | |
138 | err = 0; | |
139 | if (!atomic_inc_not_zero(&num_events)) { | |
140 | mutex_lock(&pmc_reserve_mutex); | |
141 | if (atomic_read(&num_events) == 0 && | |
142 | reserve_pmc_hardware()) | |
143 | err = -EBUSY; | |
144 | else | |
145 | atomic_inc(&num_events); | |
146 | mutex_unlock(&pmc_reserve_mutex); | |
147 | } | |
148 | ||
149 | if (err) | |
150 | return err; | |
151 | ||
152 | event->destroy = hw_perf_event_destroy; | |
153 | ||
154 | switch (attr->type) { | |
155 | case PERF_TYPE_RAW: | |
156 | config = attr->config & sh_pmu->raw_event_mask; | |
157 | break; | |
158 | case PERF_TYPE_HW_CACHE: | |
159 | err = hw_perf_cache_event(attr->config, &config); | |
160 | if (err) | |
161 | return err; | |
162 | break; | |
163 | case PERF_TYPE_HARDWARE: | |
164 | if (attr->config >= sh_pmu->max_events) | |
165 | return -EINVAL; | |
166 | ||
167 | config = sh_pmu->event_map(attr->config); | |
168 | break; | |
ac44e669 PM |
169 | } |
170 | ||
171 | if (config == -1) | |
172 | return -EINVAL; | |
173 | ||
174 | hwc->config |= config; | |
175 | ||
176 | return 0; | |
177 | } | |
178 | ||
179 | static void sh_perf_event_update(struct perf_event *event, | |
180 | struct hw_perf_event *hwc, int idx) | |
181 | { | |
182 | u64 prev_raw_count, new_raw_count; | |
183 | s64 delta; | |
184 | int shift = 0; | |
185 | ||
186 | /* | |
187 | * Depending on the counter configuration, they may or may not | |
188 | * be chained, in which case the previous counter value can be | |
189 | * updated underneath us if the lower-half overflows. | |
190 | * | |
191 | * Our tactic to handle this is to first atomically read and | |
192 | * exchange a new raw count - then add that new-prev delta | |
193 | * count to the generic counter atomically. | |
194 | * | |
195 | * As there is no interrupt associated with the overflow events, | |
196 | * this is the simplest approach for maintaining consistency. | |
197 | */ | |
198 | again: | |
e7850595 | 199 | prev_raw_count = local64_read(&hwc->prev_count); |
ac44e669 PM |
200 | new_raw_count = sh_pmu->read(idx); |
201 | ||
e7850595 | 202 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, |
ac44e669 PM |
203 | new_raw_count) != prev_raw_count) |
204 | goto again; | |
205 | ||
206 | /* | |
207 | * Now we have the new raw value and have updated the prev | |
208 | * timestamp already. We can now calculate the elapsed delta | |
209 | * (counter-)time and add that to the generic counter. | |
210 | * | |
211 | * Careful, not all hw sign-extends above the physical width | |
212 | * of the count. | |
213 | */ | |
214 | delta = (new_raw_count << shift) - (prev_raw_count << shift); | |
215 | delta >>= shift; | |
216 | ||
e7850595 | 217 | local64_add(delta, &event->count); |
ac44e669 PM |
218 | } |
219 | ||
a4eaf7f1 | 220 | static void sh_pmu_stop(struct perf_event *event, int flags) |
ac44e669 | 221 | { |
c473b2c6 | 222 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
ac44e669 PM |
223 | struct hw_perf_event *hwc = &event->hw; |
224 | int idx = hwc->idx; | |
225 | ||
a4eaf7f1 PZ |
226 | if (!(event->hw.state & PERF_HES_STOPPED)) { |
227 | sh_pmu->disable(hwc, idx); | |
228 | cpuc->events[idx] = NULL; | |
229 | event->hw.state |= PERF_HES_STOPPED; | |
230 | } | |
ac44e669 | 231 | |
a4eaf7f1 PZ |
232 | if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) { |
233 | sh_perf_event_update(event, &event->hw, idx); | |
234 | event->hw.state |= PERF_HES_UPTODATE; | |
235 | } | |
236 | } | |
ac44e669 | 237 | |
a4eaf7f1 PZ |
238 | static void sh_pmu_start(struct perf_event *event, int flags) |
239 | { | |
c473b2c6 | 240 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
a4eaf7f1 PZ |
241 | struct hw_perf_event *hwc = &event->hw; |
242 | int idx = hwc->idx; | |
ac44e669 | 243 | |
a4eaf7f1 PZ |
244 | if (WARN_ON_ONCE(idx == -1)) |
245 | return; | |
246 | ||
247 | if (flags & PERF_EF_RELOAD) | |
248 | WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); | |
249 | ||
250 | cpuc->events[idx] = event; | |
251 | event->hw.state = 0; | |
252 | sh_pmu->enable(hwc, idx); | |
253 | } | |
254 | ||
255 | static void sh_pmu_del(struct perf_event *event, int flags) | |
256 | { | |
c473b2c6 | 257 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
ac44e669 | 258 | |
a4eaf7f1 PZ |
259 | sh_pmu_stop(event, PERF_EF_UPDATE); |
260 | __clear_bit(event->hw.idx, cpuc->used_mask); | |
ac44e669 PM |
261 | |
262 | perf_event_update_userpage(event); | |
263 | } | |
264 | ||
a4eaf7f1 | 265 | static int sh_pmu_add(struct perf_event *event, int flags) |
ac44e669 | 266 | { |
c473b2c6 | 267 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
ac44e669 PM |
268 | struct hw_perf_event *hwc = &event->hw; |
269 | int idx = hwc->idx; | |
24cd7f54 PZ |
270 | int ret = -EAGAIN; |
271 | ||
33696fc0 | 272 | perf_pmu_disable(event->pmu); |
ac44e669 | 273 | |
a4eaf7f1 | 274 | if (__test_and_set_bit(idx, cpuc->used_mask)) { |
ac44e669 PM |
275 | idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events); |
276 | if (idx == sh_pmu->num_events) | |
24cd7f54 | 277 | goto out; |
ac44e669 | 278 | |
a4eaf7f1 | 279 | __set_bit(idx, cpuc->used_mask); |
ac44e669 PM |
280 | hwc->idx = idx; |
281 | } | |
282 | ||
283 | sh_pmu->disable(hwc, idx); | |
284 | ||
a4eaf7f1 PZ |
285 | event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; |
286 | if (flags & PERF_EF_START) | |
287 | sh_pmu_start(event, PERF_EF_RELOAD); | |
ac44e669 PM |
288 | |
289 | perf_event_update_userpage(event); | |
24cd7f54 PZ |
290 | ret = 0; |
291 | out: | |
33696fc0 | 292 | perf_pmu_enable(event->pmu); |
24cd7f54 | 293 | return ret; |
ac44e669 PM |
294 | } |
295 | ||
296 | static void sh_pmu_read(struct perf_event *event) | |
297 | { | |
298 | sh_perf_event_update(event, &event->hw, event->hw.idx); | |
299 | } | |
300 | ||
b0a873eb | 301 | static int sh_pmu_event_init(struct perf_event *event) |
ac44e669 | 302 | { |
b0a873eb PZ |
303 | int err; |
304 | ||
2481c5fa SE |
305 | /* does not support taken branch sampling */ |
306 | if (has_branch_stack(event)) | |
307 | return -EOPNOTSUPP; | |
308 | ||
b0a873eb PZ |
309 | switch (event->attr.type) { |
310 | case PERF_TYPE_RAW: | |
311 | case PERF_TYPE_HW_CACHE: | |
312 | case PERF_TYPE_HARDWARE: | |
313 | err = __hw_perf_event_init(event); | |
314 | break; | |
315 | ||
316 | default: | |
317 | return -ENOENT; | |
318 | } | |
319 | ||
ac44e669 PM |
320 | if (unlikely(err)) { |
321 | if (event->destroy) | |
322 | event->destroy(event); | |
ac44e669 PM |
323 | } |
324 | ||
b0a873eb | 325 | return err; |
ac44e669 PM |
326 | } |
327 | ||
a4eaf7f1 | 328 | static void sh_pmu_enable(struct pmu *pmu) |
33696fc0 PZ |
329 | { |
330 | if (!sh_pmu_initialized()) | |
331 | return; | |
332 | ||
333 | sh_pmu->enable_all(); | |
334 | } | |
335 | ||
a4eaf7f1 | 336 | static void sh_pmu_disable(struct pmu *pmu) |
33696fc0 PZ |
337 | { |
338 | if (!sh_pmu_initialized()) | |
339 | return; | |
340 | ||
341 | sh_pmu->disable_all(); | |
ac44e669 PM |
342 | } |
343 | ||
b0a873eb | 344 | static struct pmu pmu = { |
a4eaf7f1 PZ |
345 | .pmu_enable = sh_pmu_enable, |
346 | .pmu_disable = sh_pmu_disable, | |
b0a873eb | 347 | .event_init = sh_pmu_event_init, |
a4eaf7f1 PZ |
348 | .add = sh_pmu_add, |
349 | .del = sh_pmu_del, | |
350 | .start = sh_pmu_start, | |
351 | .stop = sh_pmu_stop, | |
b0a873eb PZ |
352 | .read = sh_pmu_read, |
353 | }; | |
354 | ||
3f6da390 | 355 | static void sh_pmu_setup(int cpu) |
ac44e669 PM |
356 | { |
357 | struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); | |
358 | ||
359 | memset(cpuhw, 0, sizeof(struct cpu_hw_events)); | |
360 | } | |
361 | ||
4603f53a | 362 | static int |
3f6da390 PZ |
363 | sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) |
364 | { | |
365 | unsigned int cpu = (long)hcpu; | |
366 | ||
367 | switch (action & ~CPU_TASKS_FROZEN) { | |
368 | case CPU_UP_PREPARE: | |
369 | sh_pmu_setup(cpu); | |
370 | break; | |
371 | ||
372 | default: | |
373 | break; | |
374 | } | |
375 | ||
376 | return NOTIFY_OK; | |
377 | } | |
378 | ||
4603f53a | 379 | int register_sh_pmu(struct sh_pmu *_pmu) |
ac44e669 PM |
380 | { |
381 | if (sh_pmu) | |
382 | return -EBUSY; | |
a4eaf7f1 | 383 | sh_pmu = _pmu; |
ac44e669 | 384 | |
a4eaf7f1 | 385 | pr_info("Performance Events: %s support registered\n", _pmu->name); |
ac44e669 | 386 | |
a10d60c0 VW |
387 | /* |
388 | * All of the on-chip counters are "limited", in that they have | |
389 | * no interrupts, and are therefore unable to do sampling without | |
390 | * further work and timer assistance. | |
391 | */ | |
392 | pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; | |
393 | ||
a4eaf7f1 | 394 | WARN_ON(_pmu->num_events > MAX_HWEVENTS); |
ac44e669 | 395 | |
2e80a82a | 396 | perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); |
3f6da390 | 397 | perf_cpu_notifier(sh_pmu_notifier); |
ac44e669 PM |
398 | return 0; |
399 | } |