Commit | Line | Data |
---|---|---|
ac44e669 PM |
1 | /* |
2 | * Performance event support framework for SuperH hardware counters. | |
3 | * | |
4 | * Copyright (C) 2009 Paul Mundt | |
5 | * | |
6 | * Heavily based on the x86 and PowerPC implementations. | |
7 | * | |
8 | * x86: | |
9 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> | |
10 | * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar | |
11 | * Copyright (C) 2009 Jaswinder Singh Rajput | |
12 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter | |
13 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | |
14 | * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> | |
15 | * | |
16 | * ppc: | |
17 | * Copyright 2008-2009 Paul Mackerras, IBM Corporation. | |
18 | * | |
19 | * This file is subject to the terms and conditions of the GNU General Public | |
20 | * License. See the file "COPYING" in the main directory of this archive | |
21 | * for more details. | |
22 | */ | |
23 | #include <linux/kernel.h> | |
24 | #include <linux/init.h> | |
25 | #include <linux/io.h> | |
26 | #include <linux/irq.h> | |
27 | #include <linux/perf_event.h> | |
28 | #include <asm/processor.h> | |
29 | ||
30 | struct cpu_hw_events { | |
31 | struct perf_event *events[MAX_HWEVENTS]; | |
32 | unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; | |
33 | unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; | |
34 | }; | |
35 | ||
36 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); | |
37 | ||
38 | static struct sh_pmu *sh_pmu __read_mostly; | |
39 | ||
40 | /* Number of perf_events counting hardware events */ | |
41 | static atomic_t num_events; | |
42 | /* Used to avoid races in calling reserve/release_pmc_hardware */ | |
43 | static DEFINE_MUTEX(pmc_reserve_mutex); | |
44 | ||
45 | /* | |
46 | * Stub these out for now, do something more profound later. | |
47 | */ | |
48 | int reserve_pmc_hardware(void) | |
49 | { | |
50 | return 0; | |
51 | } | |
52 | ||
53 | void release_pmc_hardware(void) | |
54 | { | |
55 | } | |
56 | ||
57 | static inline int sh_pmu_initialized(void) | |
58 | { | |
59 | return !!sh_pmu; | |
60 | } | |
61 | ||
62 | /* | |
63 | * Release the PMU if this is the last perf_event. | |
64 | */ | |
65 | static void hw_perf_event_destroy(struct perf_event *event) | |
66 | { | |
67 | if (!atomic_add_unless(&num_events, -1, 1)) { | |
68 | mutex_lock(&pmc_reserve_mutex); | |
69 | if (atomic_dec_return(&num_events) == 0) | |
70 | release_pmc_hardware(); | |
71 | mutex_unlock(&pmc_reserve_mutex); | |
72 | } | |
73 | } | |
74 | ||
75 | static int hw_perf_cache_event(int config, int *evp) | |
76 | { | |
77 | unsigned long type, op, result; | |
78 | int ev; | |
79 | ||
80 | if (!sh_pmu->cache_events) | |
81 | return -EINVAL; | |
82 | ||
83 | /* unpack config */ | |
84 | type = config & 0xff; | |
85 | op = (config >> 8) & 0xff; | |
86 | result = (config >> 16) & 0xff; | |
87 | ||
88 | if (type >= PERF_COUNT_HW_CACHE_MAX || | |
89 | op >= PERF_COUNT_HW_CACHE_OP_MAX || | |
90 | result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | |
91 | return -EINVAL; | |
92 | ||
93 | ev = (*sh_pmu->cache_events)[type][op][result]; | |
94 | if (ev == 0) | |
95 | return -EOPNOTSUPP; | |
96 | if (ev == -1) | |
97 | return -EINVAL; | |
98 | *evp = ev; | |
99 | return 0; | |
100 | } | |
101 | ||
102 | static int __hw_perf_event_init(struct perf_event *event) | |
103 | { | |
104 | struct perf_event_attr *attr = &event->attr; | |
105 | struct hw_perf_event *hwc = &event->hw; | |
106 | int config; | |
107 | int err; | |
108 | ||
109 | if (!sh_pmu_initialized()) | |
110 | return -ENODEV; | |
111 | ||
112 | /* | |
113 | * All of the on-chip counters are "limited", in that they have | |
114 | * no interrupts, and are therefore unable to do sampling without | |
115 | * further work and timer assistance. | |
116 | */ | |
117 | if (hwc->sample_period) | |
118 | return -EINVAL; | |
119 | ||
120 | /* | |
121 | * See if we need to reserve the counter. | |
122 | * | |
123 | * If no events are currently in use, then we have to take a | |
124 | * mutex to ensure that we don't race with another task doing | |
125 | * reserve_pmc_hardware or release_pmc_hardware. | |
126 | */ | |
127 | err = 0; | |
128 | if (!atomic_inc_not_zero(&num_events)) { | |
129 | mutex_lock(&pmc_reserve_mutex); | |
130 | if (atomic_read(&num_events) == 0 && | |
131 | reserve_pmc_hardware()) | |
132 | err = -EBUSY; | |
133 | else | |
134 | atomic_inc(&num_events); | |
135 | mutex_unlock(&pmc_reserve_mutex); | |
136 | } | |
137 | ||
138 | if (err) | |
139 | return err; | |
140 | ||
141 | event->destroy = hw_perf_event_destroy; | |
142 | ||
143 | switch (attr->type) { | |
144 | case PERF_TYPE_RAW: | |
145 | config = attr->config & sh_pmu->raw_event_mask; | |
146 | break; | |
147 | case PERF_TYPE_HW_CACHE: | |
148 | err = hw_perf_cache_event(attr->config, &config); | |
149 | if (err) | |
150 | return err; | |
151 | break; | |
152 | case PERF_TYPE_HARDWARE: | |
153 | if (attr->config >= sh_pmu->max_events) | |
154 | return -EINVAL; | |
155 | ||
156 | config = sh_pmu->event_map(attr->config); | |
157 | break; | |
158 | default: | |
159 | return -EINVAL; | |
160 | } | |
161 | ||
162 | if (config == -1) | |
163 | return -EINVAL; | |
164 | ||
165 | hwc->config |= config; | |
166 | ||
167 | return 0; | |
168 | } | |
169 | ||
170 | static void sh_perf_event_update(struct perf_event *event, | |
171 | struct hw_perf_event *hwc, int idx) | |
172 | { | |
173 | u64 prev_raw_count, new_raw_count; | |
174 | s64 delta; | |
175 | int shift = 0; | |
176 | ||
177 | /* | |
178 | * Depending on the counter configuration, they may or may not | |
179 | * be chained, in which case the previous counter value can be | |
180 | * updated underneath us if the lower-half overflows. | |
181 | * | |
182 | * Our tactic to handle this is to first atomically read and | |
183 | * exchange a new raw count - then add that new-prev delta | |
184 | * count to the generic counter atomically. | |
185 | * | |
186 | * As there is no interrupt associated with the overflow events, | |
187 | * this is the simplest approach for maintaining consistency. | |
188 | */ | |
189 | again: | |
190 | prev_raw_count = atomic64_read(&hwc->prev_count); | |
191 | new_raw_count = sh_pmu->read(idx); | |
192 | ||
193 | if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, | |
194 | new_raw_count) != prev_raw_count) | |
195 | goto again; | |
196 | ||
197 | /* | |
198 | * Now we have the new raw value and have updated the prev | |
199 | * timestamp already. We can now calculate the elapsed delta | |
200 | * (counter-)time and add that to the generic counter. | |
201 | * | |
202 | * Careful, not all hw sign-extends above the physical width | |
203 | * of the count. | |
204 | */ | |
205 | delta = (new_raw_count << shift) - (prev_raw_count << shift); | |
206 | delta >>= shift; | |
207 | ||
208 | atomic64_add(delta, &event->count); | |
209 | } | |
210 | ||
211 | static void sh_pmu_disable(struct perf_event *event) | |
212 | { | |
213 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
214 | struct hw_perf_event *hwc = &event->hw; | |
215 | int idx = hwc->idx; | |
216 | ||
217 | clear_bit(idx, cpuc->active_mask); | |
218 | sh_pmu->disable(hwc, idx); | |
219 | ||
220 | barrier(); | |
221 | ||
222 | sh_perf_event_update(event, &event->hw, idx); | |
223 | ||
224 | cpuc->events[idx] = NULL; | |
225 | clear_bit(idx, cpuc->used_mask); | |
226 | ||
227 | perf_event_update_userpage(event); | |
228 | } | |
229 | ||
230 | static int sh_pmu_enable(struct perf_event *event) | |
231 | { | |
232 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | |
233 | struct hw_perf_event *hwc = &event->hw; | |
234 | int idx = hwc->idx; | |
235 | ||
236 | if (test_and_set_bit(idx, cpuc->used_mask)) { | |
237 | idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events); | |
238 | if (idx == sh_pmu->num_events) | |
239 | return -EAGAIN; | |
240 | ||
241 | set_bit(idx, cpuc->used_mask); | |
242 | hwc->idx = idx; | |
243 | } | |
244 | ||
245 | sh_pmu->disable(hwc, idx); | |
246 | ||
247 | cpuc->events[idx] = event; | |
248 | set_bit(idx, cpuc->active_mask); | |
249 | ||
250 | sh_pmu->enable(hwc, idx); | |
251 | ||
252 | perf_event_update_userpage(event); | |
253 | ||
254 | return 0; | |
255 | } | |
256 | ||
257 | static void sh_pmu_read(struct perf_event *event) | |
258 | { | |
259 | sh_perf_event_update(event, &event->hw, event->hw.idx); | |
260 | } | |
261 | ||
262 | static const struct pmu pmu = { | |
263 | .enable = sh_pmu_enable, | |
264 | .disable = sh_pmu_disable, | |
265 | .read = sh_pmu_read, | |
266 | }; | |
267 | ||
268 | const struct pmu *hw_perf_event_init(struct perf_event *event) | |
269 | { | |
270 | int err = __hw_perf_event_init(event); | |
271 | if (unlikely(err)) { | |
272 | if (event->destroy) | |
273 | event->destroy(event); | |
274 | return ERR_PTR(err); | |
275 | } | |
276 | ||
277 | return &pmu; | |
278 | } | |
279 | ||
280 | void hw_perf_event_setup(int cpu) | |
281 | { | |
282 | struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); | |
283 | ||
284 | memset(cpuhw, 0, sizeof(struct cpu_hw_events)); | |
285 | } | |
286 | ||
287 | void hw_perf_enable(void) | |
288 | { | |
289 | if (!sh_pmu_initialized()) | |
290 | return; | |
291 | ||
292 | sh_pmu->enable_all(); | |
293 | } | |
294 | ||
295 | void hw_perf_disable(void) | |
296 | { | |
297 | if (!sh_pmu_initialized()) | |
298 | return; | |
299 | ||
300 | sh_pmu->disable_all(); | |
301 | } | |
302 | ||
303 | int register_sh_pmu(struct sh_pmu *pmu) | |
304 | { | |
305 | if (sh_pmu) | |
306 | return -EBUSY; | |
307 | sh_pmu = pmu; | |
308 | ||
309 | pr_info("Performance Events: %s support registered\n", pmu->name); | |
310 | ||
1d317f90 | 311 | WARN_ON(pmu->num_events > MAX_HWEVENTS); |
ac44e669 PM |
312 | |
313 | return 0; | |
314 | } |