Commit | Line | Data |
---|---|---|
4574910e PM |
1 | /* |
2 | * Performance counter support - powerpc architecture code | |
3 | * | |
4 | * Copyright 2008-2009 Paul Mackerras, IBM Corporation. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | */ | |
11 | #include <linux/kernel.h> | |
12 | #include <linux/sched.h> | |
13 | #include <linux/perf_counter.h> | |
14 | #include <linux/percpu.h> | |
15 | #include <linux/hardirq.h> | |
16 | #include <asm/reg.h> | |
17 | #include <asm/pmc.h> | |
01d0287f | 18 | #include <asm/machdep.h> |
4574910e PM |
19 | |
20 | struct cpu_hw_counters { | |
21 | int n_counters; | |
22 | int n_percpu; | |
23 | int disabled; | |
24 | int n_added; | |
25 | struct perf_counter *counter[MAX_HWCOUNTERS]; | |
26 | unsigned int events[MAX_HWCOUNTERS]; | |
27 | u64 mmcr[3]; | |
01d0287f | 28 | u8 pmcs_enabled; |
4574910e PM |
29 | }; |
30 | DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters); | |
31 | ||
32 | struct power_pmu *ppmu; | |
33 | ||
34 | void perf_counter_print_debug(void) | |
35 | { | |
36 | } | |
37 | ||
4574910e PM |
38 | /* |
39 | * Read one performance monitor counter (PMC). | |
40 | */ | |
41 | static unsigned long read_pmc(int idx) | |
42 | { | |
43 | unsigned long val; | |
44 | ||
45 | switch (idx) { | |
46 | case 1: | |
47 | val = mfspr(SPRN_PMC1); | |
48 | break; | |
49 | case 2: | |
50 | val = mfspr(SPRN_PMC2); | |
51 | break; | |
52 | case 3: | |
53 | val = mfspr(SPRN_PMC3); | |
54 | break; | |
55 | case 4: | |
56 | val = mfspr(SPRN_PMC4); | |
57 | break; | |
58 | case 5: | |
59 | val = mfspr(SPRN_PMC5); | |
60 | break; | |
61 | case 6: | |
62 | val = mfspr(SPRN_PMC6); | |
63 | break; | |
64 | case 7: | |
65 | val = mfspr(SPRN_PMC7); | |
66 | break; | |
67 | case 8: | |
68 | val = mfspr(SPRN_PMC8); | |
69 | break; | |
70 | default: | |
71 | printk(KERN_ERR "oops trying to read PMC%d\n", idx); | |
72 | val = 0; | |
73 | } | |
74 | return val; | |
75 | } | |
76 | ||
77 | /* | |
78 | * Write one PMC. | |
79 | */ | |
80 | static void write_pmc(int idx, unsigned long val) | |
81 | { | |
82 | switch (idx) { | |
83 | case 1: | |
84 | mtspr(SPRN_PMC1, val); | |
85 | break; | |
86 | case 2: | |
87 | mtspr(SPRN_PMC2, val); | |
88 | break; | |
89 | case 3: | |
90 | mtspr(SPRN_PMC3, val); | |
91 | break; | |
92 | case 4: | |
93 | mtspr(SPRN_PMC4, val); | |
94 | break; | |
95 | case 5: | |
96 | mtspr(SPRN_PMC5, val); | |
97 | break; | |
98 | case 6: | |
99 | mtspr(SPRN_PMC6, val); | |
100 | break; | |
101 | case 7: | |
102 | mtspr(SPRN_PMC7, val); | |
103 | break; | |
104 | case 8: | |
105 | mtspr(SPRN_PMC8, val); | |
106 | break; | |
107 | default: | |
108 | printk(KERN_ERR "oops trying to write PMC%d\n", idx); | |
109 | } | |
110 | } | |
111 | ||
112 | /* | |
113 | * Check if a set of events can all go on the PMU at once. | |
114 | * If they can't, this will look at alternative codes for the events | |
115 | * and see if any combination of alternative codes is feasible. | |
116 | * The feasible set is returned in event[]. | |
117 | */ | |
118 | static int power_check_constraints(unsigned int event[], int n_ev) | |
119 | { | |
120 | u64 mask, value, nv; | |
121 | unsigned int alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; | |
122 | u64 amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; | |
123 | u64 avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; | |
124 | u64 smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS]; | |
125 | int n_alt[MAX_HWCOUNTERS], choice[MAX_HWCOUNTERS]; | |
126 | int i, j; | |
127 | u64 addf = ppmu->add_fields; | |
128 | u64 tadd = ppmu->test_adder; | |
129 | ||
130 | if (n_ev > ppmu->n_counter) | |
131 | return -1; | |
132 | ||
133 | /* First see if the events will go on as-is */ | |
134 | for (i = 0; i < n_ev; ++i) { | |
135 | alternatives[i][0] = event[i]; | |
136 | if (ppmu->get_constraint(event[i], &amasks[i][0], | |
137 | &avalues[i][0])) | |
138 | return -1; | |
139 | choice[i] = 0; | |
140 | } | |
141 | value = mask = 0; | |
142 | for (i = 0; i < n_ev; ++i) { | |
143 | nv = (value | avalues[i][0]) + (value & avalues[i][0] & addf); | |
144 | if ((((nv + tadd) ^ value) & mask) != 0 || | |
145 | (((nv + tadd) ^ avalues[i][0]) & amasks[i][0]) != 0) | |
146 | break; | |
147 | value = nv; | |
148 | mask |= amasks[i][0]; | |
149 | } | |
150 | if (i == n_ev) | |
151 | return 0; /* all OK */ | |
152 | ||
153 | /* doesn't work, gather alternatives... */ | |
154 | if (!ppmu->get_alternatives) | |
155 | return -1; | |
156 | for (i = 0; i < n_ev; ++i) { | |
157 | n_alt[i] = ppmu->get_alternatives(event[i], alternatives[i]); | |
158 | for (j = 1; j < n_alt[i]; ++j) | |
159 | ppmu->get_constraint(alternatives[i][j], | |
160 | &amasks[i][j], &avalues[i][j]); | |
161 | } | |
162 | ||
163 | /* enumerate all possibilities and see if any will work */ | |
164 | i = 0; | |
165 | j = -1; | |
166 | value = mask = nv = 0; | |
167 | while (i < n_ev) { | |
168 | if (j >= 0) { | |
169 | /* we're backtracking, restore context */ | |
170 | value = svalues[i]; | |
171 | mask = smasks[i]; | |
172 | j = choice[i]; | |
173 | } | |
174 | /* | |
175 | * See if any alternative k for event i, | |
176 | * where k > j, will satisfy the constraints. | |
177 | */ | |
178 | while (++j < n_alt[i]) { | |
179 | nv = (value | avalues[i][j]) + | |
180 | (value & avalues[i][j] & addf); | |
181 | if ((((nv + tadd) ^ value) & mask) == 0 && | |
182 | (((nv + tadd) ^ avalues[i][j]) | |
183 | & amasks[i][j]) == 0) | |
184 | break; | |
185 | } | |
186 | if (j >= n_alt[i]) { | |
187 | /* | |
188 | * No feasible alternative, backtrack | |
189 | * to event i-1 and continue enumerating its | |
190 | * alternatives from where we got up to. | |
191 | */ | |
192 | if (--i < 0) | |
193 | return -1; | |
194 | } else { | |
195 | /* | |
196 | * Found a feasible alternative for event i, | |
197 | * remember where we got up to with this event, | |
198 | * go on to the next event, and start with | |
199 | * the first alternative for it. | |
200 | */ | |
201 | choice[i] = j; | |
202 | svalues[i] = value; | |
203 | smasks[i] = mask; | |
204 | value = nv; | |
205 | mask |= amasks[i][j]; | |
206 | ++i; | |
207 | j = -1; | |
208 | } | |
209 | } | |
210 | ||
211 | /* OK, we have a feasible combination, tell the caller the solution */ | |
212 | for (i = 0; i < n_ev; ++i) | |
213 | event[i] = alternatives[i][choice[i]]; | |
214 | return 0; | |
215 | } | |
216 | ||
217 | static void power_perf_read(struct perf_counter *counter) | |
218 | { | |
219 | long val, delta, prev; | |
220 | ||
221 | if (!counter->hw.idx) | |
222 | return; | |
223 | /* | |
224 | * Performance monitor interrupts come even when interrupts | |
225 | * are soft-disabled, as long as interrupts are hard-enabled. | |
226 | * Therefore we treat them like NMIs. | |
227 | */ | |
228 | do { | |
229 | prev = atomic64_read(&counter->hw.prev_count); | |
230 | barrier(); | |
231 | val = read_pmc(counter->hw.idx); | |
232 | } while (atomic64_cmpxchg(&counter->hw.prev_count, prev, val) != prev); | |
233 | ||
234 | /* The counters are only 32 bits wide */ | |
235 | delta = (val - prev) & 0xfffffffful; | |
236 | atomic64_add(delta, &counter->count); | |
237 | atomic64_sub(delta, &counter->hw.period_left); | |
238 | } | |
239 | ||
240 | /* | |
241 | * Disable all counters to prevent PMU interrupts and to allow | |
242 | * counters to be added or removed. | |
243 | */ | |
244 | u64 hw_perf_save_disable(void) | |
245 | { | |
246 | struct cpu_hw_counters *cpuhw; | |
247 | unsigned long ret; | |
248 | unsigned long flags; | |
249 | ||
250 | local_irq_save(flags); | |
251 | cpuhw = &__get_cpu_var(cpu_hw_counters); | |
252 | ||
253 | ret = cpuhw->disabled; | |
254 | if (!ret) { | |
255 | cpuhw->disabled = 1; | |
256 | cpuhw->n_added = 0; | |
257 | ||
01d0287f PM |
258 | /* |
259 | * Check if we ever enabled the PMU on this cpu. | |
260 | */ | |
261 | if (!cpuhw->pmcs_enabled) { | |
262 | if (ppc_md.enable_pmcs) | |
263 | ppc_md.enable_pmcs(); | |
264 | cpuhw->pmcs_enabled = 1; | |
265 | } | |
266 | ||
4574910e PM |
267 | /* |
268 | * Set the 'freeze counters' bit. | |
269 | * The barrier is to make sure the mtspr has been | |
270 | * executed and the PMU has frozen the counters | |
271 | * before we return. | |
272 | */ | |
273 | mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) | MMCR0_FC); | |
274 | mb(); | |
275 | } | |
276 | local_irq_restore(flags); | |
277 | return ret; | |
278 | } | |
279 | ||
280 | /* | |
281 | * Re-enable all counters if disable == 0. | |
282 | * If we were previously disabled and counters were added, then | |
283 | * put the new config on the PMU. | |
284 | */ | |
285 | void hw_perf_restore(u64 disable) | |
286 | { | |
287 | struct perf_counter *counter; | |
288 | struct cpu_hw_counters *cpuhw; | |
289 | unsigned long flags; | |
290 | long i; | |
291 | unsigned long val; | |
292 | s64 left; | |
293 | unsigned int hwc_index[MAX_HWCOUNTERS]; | |
294 | ||
295 | if (disable) | |
296 | return; | |
297 | local_irq_save(flags); | |
298 | cpuhw = &__get_cpu_var(cpu_hw_counters); | |
299 | cpuhw->disabled = 0; | |
300 | ||
301 | /* | |
302 | * If we didn't change anything, or only removed counters, | |
303 | * no need to recalculate MMCR* settings and reset the PMCs. | |
304 | * Just reenable the PMU with the current MMCR* settings | |
305 | * (possibly updated for removal of counters). | |
306 | */ | |
307 | if (!cpuhw->n_added) { | |
308 | mtspr(SPRN_MMCRA, cpuhw->mmcr[2]); | |
309 | mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); | |
310 | mtspr(SPRN_MMCR0, cpuhw->mmcr[0]); | |
01d0287f PM |
311 | if (cpuhw->n_counters == 0) |
312 | get_lppaca()->pmcregs_in_use = 0; | |
4574910e PM |
313 | goto out; |
314 | } | |
315 | ||
316 | /* | |
317 | * Compute MMCR* values for the new set of counters | |
318 | */ | |
319 | if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_counters, hwc_index, | |
320 | cpuhw->mmcr)) { | |
321 | /* shouldn't ever get here */ | |
322 | printk(KERN_ERR "oops compute_mmcr failed\n"); | |
323 | goto out; | |
324 | } | |
325 | ||
326 | /* | |
327 | * Write the new configuration to MMCR* with the freeze | |
328 | * bit set and set the hardware counters to their initial values. | |
329 | * Then unfreeze the counters. | |
330 | */ | |
01d0287f | 331 | get_lppaca()->pmcregs_in_use = 1; |
4574910e PM |
332 | mtspr(SPRN_MMCRA, cpuhw->mmcr[2]); |
333 | mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); | |
334 | mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)) | |
335 | | MMCR0_FC); | |
336 | ||
337 | /* | |
338 | * Read off any pre-existing counters that need to move | |
339 | * to another PMC. | |
340 | */ | |
341 | for (i = 0; i < cpuhw->n_counters; ++i) { | |
342 | counter = cpuhw->counter[i]; | |
343 | if (counter->hw.idx && counter->hw.idx != hwc_index[i] + 1) { | |
344 | power_perf_read(counter); | |
345 | write_pmc(counter->hw.idx, 0); | |
346 | counter->hw.idx = 0; | |
347 | } | |
348 | } | |
349 | ||
350 | /* | |
351 | * Initialize the PMCs for all the new and moved counters. | |
352 | */ | |
353 | for (i = 0; i < cpuhw->n_counters; ++i) { | |
354 | counter = cpuhw->counter[i]; | |
355 | if (counter->hw.idx) | |
356 | continue; | |
357 | val = 0; | |
358 | if (counter->hw_event.irq_period) { | |
359 | left = atomic64_read(&counter->hw.period_left); | |
360 | if (left < 0x80000000L) | |
361 | val = 0x80000000L - left; | |
362 | } | |
363 | atomic64_set(&counter->hw.prev_count, val); | |
364 | counter->hw.idx = hwc_index[i] + 1; | |
365 | write_pmc(counter->hw.idx, val); | |
366 | } | |
367 | mb(); | |
368 | cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE; | |
369 | mtspr(SPRN_MMCR0, cpuhw->mmcr[0]); | |
370 | ||
371 | out: | |
372 | local_irq_restore(flags); | |
373 | } | |
374 | ||
375 | static int collect_events(struct perf_counter *group, int max_count, | |
376 | struct perf_counter *ctrs[], unsigned int *events) | |
377 | { | |
378 | int n = 0; | |
379 | struct perf_counter *counter; | |
380 | ||
381 | if (!is_software_counter(group)) { | |
382 | if (n >= max_count) | |
383 | return -1; | |
384 | ctrs[n] = group; | |
385 | events[n++] = group->hw.config; | |
386 | } | |
387 | list_for_each_entry(counter, &group->sibling_list, list_entry) { | |
388 | if (!is_software_counter(counter) && | |
389 | counter->state != PERF_COUNTER_STATE_OFF) { | |
390 | if (n >= max_count) | |
391 | return -1; | |
392 | ctrs[n] = counter; | |
393 | events[n++] = counter->hw.config; | |
394 | } | |
395 | } | |
396 | return n; | |
397 | } | |
398 | ||
399 | static void counter_sched_in(struct perf_counter *counter, int cpu) | |
400 | { | |
401 | counter->state = PERF_COUNTER_STATE_ACTIVE; | |
402 | counter->oncpu = cpu; | |
403 | if (is_software_counter(counter)) | |
404 | counter->hw_ops->enable(counter); | |
405 | } | |
406 | ||
407 | /* | |
408 | * Called to enable a whole group of counters. | |
409 | * Returns 1 if the group was enabled, or -EAGAIN if it could not be. | |
410 | * Assumes the caller has disabled interrupts and has | |
411 | * frozen the PMU with hw_perf_save_disable. | |
412 | */ | |
413 | int hw_perf_group_sched_in(struct perf_counter *group_leader, | |
414 | struct perf_cpu_context *cpuctx, | |
415 | struct perf_counter_context *ctx, int cpu) | |
416 | { | |
417 | struct cpu_hw_counters *cpuhw; | |
418 | long i, n, n0; | |
419 | struct perf_counter *sub; | |
420 | ||
421 | cpuhw = &__get_cpu_var(cpu_hw_counters); | |
422 | n0 = cpuhw->n_counters; | |
423 | n = collect_events(group_leader, ppmu->n_counter - n0, | |
424 | &cpuhw->counter[n0], &cpuhw->events[n0]); | |
425 | if (n < 0) | |
426 | return -EAGAIN; | |
427 | if (power_check_constraints(cpuhw->events, n + n0)) | |
428 | return -EAGAIN; | |
429 | cpuhw->n_counters = n0 + n; | |
430 | cpuhw->n_added += n; | |
431 | ||
432 | /* | |
433 | * OK, this group can go on; update counter states etc., | |
434 | * and enable any software counters | |
435 | */ | |
436 | for (i = n0; i < n0 + n; ++i) | |
437 | cpuhw->counter[i]->hw.config = cpuhw->events[i]; | |
3b6f9e5c | 438 | cpuctx->active_oncpu += n; |
4574910e PM |
439 | n = 1; |
440 | counter_sched_in(group_leader, cpu); | |
441 | list_for_each_entry(sub, &group_leader->sibling_list, list_entry) { | |
442 | if (sub->state != PERF_COUNTER_STATE_OFF) { | |
443 | counter_sched_in(sub, cpu); | |
444 | ++n; | |
445 | } | |
446 | } | |
4574910e PM |
447 | ctx->nr_active += n; |
448 | ||
449 | return 1; | |
450 | } | |
451 | ||
452 | /* | |
453 | * Add a counter to the PMU. | |
454 | * If all counters are not already frozen, then we disable and | |
455 | * re-enable the PMU in order to get hw_perf_restore to do the | |
456 | * actual work of reconfiguring the PMU. | |
457 | */ | |
458 | static int power_perf_enable(struct perf_counter *counter) | |
459 | { | |
460 | struct cpu_hw_counters *cpuhw; | |
461 | unsigned long flags; | |
462 | u64 pmudis; | |
463 | int n0; | |
464 | int ret = -EAGAIN; | |
465 | ||
466 | local_irq_save(flags); | |
467 | pmudis = hw_perf_save_disable(); | |
468 | ||
469 | /* | |
470 | * Add the counter to the list (if there is room) | |
471 | * and check whether the total set is still feasible. | |
472 | */ | |
473 | cpuhw = &__get_cpu_var(cpu_hw_counters); | |
474 | n0 = cpuhw->n_counters; | |
475 | if (n0 >= ppmu->n_counter) | |
476 | goto out; | |
477 | cpuhw->counter[n0] = counter; | |
478 | cpuhw->events[n0] = counter->hw.config; | |
479 | if (power_check_constraints(cpuhw->events, n0 + 1)) | |
480 | goto out; | |
481 | ||
482 | counter->hw.config = cpuhw->events[n0]; | |
483 | ++cpuhw->n_counters; | |
484 | ++cpuhw->n_added; | |
485 | ||
486 | ret = 0; | |
487 | out: | |
488 | hw_perf_restore(pmudis); | |
489 | local_irq_restore(flags); | |
490 | return ret; | |
491 | } | |
492 | ||
493 | /* | |
494 | * Remove a counter from the PMU. | |
495 | */ | |
496 | static void power_perf_disable(struct perf_counter *counter) | |
497 | { | |
498 | struct cpu_hw_counters *cpuhw; | |
499 | long i; | |
500 | u64 pmudis; | |
501 | unsigned long flags; | |
502 | ||
503 | local_irq_save(flags); | |
504 | pmudis = hw_perf_save_disable(); | |
505 | ||
506 | power_perf_read(counter); | |
507 | ||
508 | cpuhw = &__get_cpu_var(cpu_hw_counters); | |
509 | for (i = 0; i < cpuhw->n_counters; ++i) { | |
510 | if (counter == cpuhw->counter[i]) { | |
511 | while (++i < cpuhw->n_counters) | |
512 | cpuhw->counter[i-1] = cpuhw->counter[i]; | |
513 | --cpuhw->n_counters; | |
514 | ppmu->disable_pmc(counter->hw.idx - 1, cpuhw->mmcr); | |
515 | write_pmc(counter->hw.idx, 0); | |
516 | counter->hw.idx = 0; | |
517 | break; | |
518 | } | |
519 | } | |
520 | if (cpuhw->n_counters == 0) { | |
521 | /* disable exceptions if no counters are running */ | |
522 | cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); | |
523 | } | |
524 | ||
525 | hw_perf_restore(pmudis); | |
526 | local_irq_restore(flags); | |
527 | } | |
528 | ||
529 | struct hw_perf_counter_ops power_perf_ops = { | |
530 | .enable = power_perf_enable, | |
531 | .disable = power_perf_disable, | |
532 | .read = power_perf_read | |
533 | }; | |
534 | ||
535 | const struct hw_perf_counter_ops * | |
536 | hw_perf_counter_init(struct perf_counter *counter) | |
537 | { | |
538 | unsigned long ev; | |
539 | struct perf_counter *ctrs[MAX_HWCOUNTERS]; | |
540 | unsigned int events[MAX_HWCOUNTERS]; | |
541 | int n; | |
542 | ||
543 | if (!ppmu) | |
544 | return NULL; | |
545 | if ((s64)counter->hw_event.irq_period < 0) | |
546 | return NULL; | |
547 | ev = counter->hw_event.type; | |
548 | if (!counter->hw_event.raw) { | |
549 | if (ev >= ppmu->n_generic || | |
550 | ppmu->generic_events[ev] == 0) | |
551 | return NULL; | |
552 | ev = ppmu->generic_events[ev]; | |
553 | } | |
554 | counter->hw.config_base = ev; | |
555 | counter->hw.idx = 0; | |
556 | ||
557 | /* | |
558 | * If this is in a group, check if it can go on with all the | |
559 | * other hardware counters in the group. We assume the counter | |
560 | * hasn't been linked into its leader's sibling list at this point. | |
561 | */ | |
562 | n = 0; | |
563 | if (counter->group_leader != counter) { | |
564 | n = collect_events(counter->group_leader, ppmu->n_counter - 1, | |
565 | ctrs, events); | |
566 | if (n < 0) | |
567 | return NULL; | |
568 | } | |
569 | events[n++] = ev; | |
570 | if (power_check_constraints(events, n)) | |
571 | return NULL; | |
572 | ||
573 | counter->hw.config = events[n - 1]; | |
574 | atomic64_set(&counter->hw.period_left, counter->hw_event.irq_period); | |
575 | return &power_perf_ops; | |
576 | } | |
577 | ||
578 | /* | |
579 | * Handle wakeups. | |
580 | */ | |
581 | void perf_counter_do_pending(void) | |
582 | { | |
583 | int i; | |
584 | struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters); | |
585 | struct perf_counter *counter; | |
586 | ||
587 | set_perf_counter_pending(0); | |
588 | for (i = 0; i < cpuhw->n_counters; ++i) { | |
589 | counter = cpuhw->counter[i]; | |
590 | if (counter && counter->wakeup_pending) { | |
591 | counter->wakeup_pending = 0; | |
592 | wake_up(&counter->waitq); | |
593 | } | |
594 | } | |
595 | } | |
596 | ||
597 | /* | |
598 | * Record data for an irq counter. | |
599 | * This function was lifted from the x86 code; maybe it should | |
600 | * go in the core? | |
601 | */ | |
602 | static void perf_store_irq_data(struct perf_counter *counter, u64 data) | |
603 | { | |
604 | struct perf_data *irqdata = counter->irqdata; | |
605 | ||
606 | if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) { | |
607 | irqdata->overrun++; | |
608 | } else { | |
609 | u64 *p = (u64 *) &irqdata->data[irqdata->len]; | |
610 | ||
611 | *p = data; | |
612 | irqdata->len += sizeof(u64); | |
613 | } | |
614 | } | |
615 | ||
616 | /* | |
617 | * Record all the values of the counters in a group | |
618 | */ | |
619 | static void perf_handle_group(struct perf_counter *counter) | |
620 | { | |
621 | struct perf_counter *leader, *sub; | |
622 | ||
623 | leader = counter->group_leader; | |
624 | list_for_each_entry(sub, &leader->sibling_list, list_entry) { | |
625 | if (sub != counter) | |
626 | sub->hw_ops->read(sub); | |
627 | perf_store_irq_data(counter, sub->hw_event.type); | |
628 | perf_store_irq_data(counter, atomic64_read(&sub->count)); | |
629 | } | |
630 | } | |
631 | ||
632 | /* | |
633 | * A counter has overflowed; update its count and record | |
634 | * things if requested. Note that interrupts are hard-disabled | |
635 | * here so there is no possibility of being interrupted. | |
636 | */ | |
637 | static void record_and_restart(struct perf_counter *counter, long val, | |
638 | struct pt_regs *regs) | |
639 | { | |
640 | s64 prev, delta, left; | |
641 | int record = 0; | |
642 | ||
643 | /* we don't have to worry about interrupts here */ | |
644 | prev = atomic64_read(&counter->hw.prev_count); | |
645 | delta = (val - prev) & 0xfffffffful; | |
646 | atomic64_add(delta, &counter->count); | |
647 | ||
648 | /* | |
649 | * See if the total period for this counter has expired, | |
650 | * and update for the next period. | |
651 | */ | |
652 | val = 0; | |
653 | left = atomic64_read(&counter->hw.period_left) - delta; | |
654 | if (counter->hw_event.irq_period) { | |
655 | if (left <= 0) { | |
656 | left += counter->hw_event.irq_period; | |
657 | if (left <= 0) | |
658 | left = counter->hw_event.irq_period; | |
659 | record = 1; | |
660 | } | |
661 | if (left < 0x80000000L) | |
662 | val = 0x80000000L - left; | |
663 | } | |
664 | write_pmc(counter->hw.idx, val); | |
665 | atomic64_set(&counter->hw.prev_count, val); | |
666 | atomic64_set(&counter->hw.period_left, left); | |
667 | ||
668 | /* | |
669 | * Finally record data if requested. | |
670 | */ | |
671 | if (record) { | |
672 | switch (counter->hw_event.record_type) { | |
673 | case PERF_RECORD_SIMPLE: | |
674 | break; | |
675 | case PERF_RECORD_IRQ: | |
676 | perf_store_irq_data(counter, instruction_pointer(regs)); | |
677 | counter->wakeup_pending = 1; | |
678 | break; | |
679 | case PERF_RECORD_GROUP: | |
680 | perf_handle_group(counter); | |
681 | counter->wakeup_pending = 1; | |
682 | break; | |
683 | } | |
684 | } | |
685 | } | |
686 | ||
687 | /* | |
688 | * Performance monitor interrupt stuff | |
689 | */ | |
690 | static void perf_counter_interrupt(struct pt_regs *regs) | |
691 | { | |
692 | int i; | |
693 | struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters); | |
694 | struct perf_counter *counter; | |
695 | long val; | |
696 | int need_wakeup = 0, found = 0; | |
697 | ||
698 | for (i = 0; i < cpuhw->n_counters; ++i) { | |
699 | counter = cpuhw->counter[i]; | |
700 | val = read_pmc(counter->hw.idx); | |
701 | if ((int)val < 0) { | |
702 | /* counter has overflowed */ | |
703 | found = 1; | |
704 | record_and_restart(counter, val, regs); | |
705 | if (counter->wakeup_pending) | |
706 | need_wakeup = 1; | |
707 | } | |
708 | } | |
709 | ||
710 | /* | |
711 | * In case we didn't find and reset the counter that caused | |
712 | * the interrupt, scan all counters and reset any that are | |
713 | * negative, to avoid getting continual interrupts. | |
714 | * Any that we processed in the previous loop will not be negative. | |
715 | */ | |
716 | if (!found) { | |
717 | for (i = 0; i < ppmu->n_counter; ++i) { | |
718 | val = read_pmc(i + 1); | |
719 | if ((int)val < 0) | |
720 | write_pmc(i + 1, 0); | |
721 | } | |
722 | } | |
723 | ||
724 | /* | |
725 | * Reset MMCR0 to its normal value. This will set PMXE and | |
726 | * clear FC (freeze counters) and PMAO (perf mon alert occurred) | |
727 | * and thus allow interrupts to occur again. | |
728 | * XXX might want to use MSR.PM to keep the counters frozen until | |
729 | * we get back out of this interrupt. | |
730 | */ | |
731 | mtspr(SPRN_MMCR0, cpuhw->mmcr[0]); | |
732 | ||
733 | /* | |
734 | * If we need a wakeup, check whether interrupts were soft-enabled | |
735 | * when we took the interrupt. If they were, we can wake stuff up | |
736 | * immediately; otherwise we'll have to set a flag and do the | |
737 | * wakeup when interrupts get soft-enabled. | |
738 | */ | |
739 | if (need_wakeup) { | |
740 | if (regs->softe) { | |
741 | irq_enter(); | |
742 | perf_counter_do_pending(); | |
743 | irq_exit(); | |
744 | } else { | |
745 | set_perf_counter_pending(1); | |
746 | } | |
747 | } | |
748 | } | |
749 | ||
01d0287f PM |
750 | void hw_perf_counter_setup(int cpu) |
751 | { | |
752 | struct cpu_hw_counters *cpuhw = &per_cpu(cpu_hw_counters, cpu); | |
753 | ||
754 | memset(cpuhw, 0, sizeof(*cpuhw)); | |
755 | cpuhw->mmcr[0] = MMCR0_FC; | |
756 | } | |
757 | ||
16b06799 | 758 | extern struct power_pmu ppc970_pmu; |
f7862837 | 759 | extern struct power_pmu power6_pmu; |
16b06799 | 760 | |
4574910e PM |
761 | static int init_perf_counters(void) |
762 | { | |
16b06799 PM |
763 | unsigned long pvr; |
764 | ||
4574910e PM |
765 | if (reserve_pmc_hardware(perf_counter_interrupt)) { |
766 | printk(KERN_ERR "Couldn't init performance monitor subsystem\n"); | |
767 | return -EBUSY; | |
768 | } | |
769 | ||
16b06799 PM |
770 | /* XXX should get this from cputable */ |
771 | pvr = mfspr(SPRN_PVR); | |
772 | switch (PVR_VER(pvr)) { | |
773 | case PV_970: | |
774 | case PV_970FX: | |
775 | case PV_970MP: | |
776 | ppmu = &ppc970_pmu; | |
777 | break; | |
f7862837 PM |
778 | case 0x3e: |
779 | ppmu = &power6_pmu; | |
780 | break; | |
16b06799 | 781 | } |
4574910e PM |
782 | return 0; |
783 | } | |
784 | ||
785 | arch_initcall(init_perf_counters); |