MIPS: perf: Cleanup formatting in arch/mips/kernel/perf_event.c
[deliverable/linux.git] / arch / mips / kernel / perf_event_mipsxx.c
CommitLineData
3a9ab99e
DCZ
1#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) || \
2 defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_SB1)
3
4#define M_CONFIG1_PC (1 << 4)
5
6#define M_PERFCTL_EXL (1UL << 0)
7#define M_PERFCTL_KERNEL (1UL << 1)
8#define M_PERFCTL_SUPERVISOR (1UL << 2)
9#define M_PERFCTL_USER (1UL << 3)
10#define M_PERFCTL_INTERRUPT_ENABLE (1UL << 4)
11#define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5)
12#define M_PERFCTL_VPEID(vpe) ((vpe) << 16)
13#define M_PERFCTL_MT_EN(filter) ((filter) << 20)
14#define M_TC_EN_ALL M_PERFCTL_MT_EN(0)
15#define M_TC_EN_VPE M_PERFCTL_MT_EN(1)
16#define M_TC_EN_TC M_PERFCTL_MT_EN(2)
17#define M_PERFCTL_TCID(tcid) ((tcid) << 22)
18#define M_PERFCTL_WIDE (1UL << 30)
19#define M_PERFCTL_MORE (1UL << 31)
20
21#define M_PERFCTL_COUNT_EVENT_WHENEVER (M_PERFCTL_EXL | \
22 M_PERFCTL_KERNEL | \
23 M_PERFCTL_USER | \
24 M_PERFCTL_SUPERVISOR | \
25 M_PERFCTL_INTERRUPT_ENABLE)
26
27#ifdef CONFIG_MIPS_MT_SMP
28#define M_PERFCTL_CONFIG_MASK 0x3fff801f
29#else
30#define M_PERFCTL_CONFIG_MASK 0x1f
31#endif
32#define M_PERFCTL_EVENT_MASK 0xfe0
33
34#define M_COUNTER_OVERFLOW (1UL << 31)
35
36#ifdef CONFIG_MIPS_MT_SMP
37static int cpu_has_mipsmt_pertccounters;
38
39/*
40 * FIXME: For VSMP, vpe_id() is redefined for Perf-events, because
41 * cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs.
42 */
43#if defined(CONFIG_HW_PERF_EVENTS)
44#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
45 0 : smp_processor_id())
46#else
47#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
48 0 : cpu_data[smp_processor_id()].vpe_id)
49#endif
50
51/* Copied from op_model_mipsxx.c */
4409af37 52static unsigned int vpe_shift(void)
3a9ab99e
DCZ
53{
54 if (num_possible_cpus() > 1)
55 return 1;
56
57 return 0;
58}
3a9ab99e 59
4409af37 60static unsigned int counters_total_to_per_cpu(unsigned int counters)
3a9ab99e
DCZ
61{
62 return counters >> vpe_shift();
63}
64
4409af37 65static unsigned int counters_per_cpu_to_total(unsigned int counters)
3a9ab99e
DCZ
66{
67 return counters << vpe_shift();
68}
69
4409af37
DD
70#else /* !CONFIG_MIPS_MT_SMP */
71#define vpe_id() 0
72
73#endif /* CONFIG_MIPS_MT_SMP */
74
3a9ab99e
DCZ
75#define __define_perf_accessors(r, n, np) \
76 \
4409af37 77static unsigned int r_c0_ ## r ## n(void) \
3a9ab99e
DCZ
78{ \
79 unsigned int cpu = vpe_id(); \
80 \
81 switch (cpu) { \
82 case 0: \
83 return read_c0_ ## r ## n(); \
84 case 1: \
85 return read_c0_ ## r ## np(); \
86 default: \
87 BUG(); \
88 } \
89 return 0; \
90} \
91 \
4409af37 92static void w_c0_ ## r ## n(unsigned int value) \
3a9ab99e
DCZ
93{ \
94 unsigned int cpu = vpe_id(); \
95 \
96 switch (cpu) { \
97 case 0: \
98 write_c0_ ## r ## n(value); \
99 return; \
100 case 1: \
101 write_c0_ ## r ## np(value); \
102 return; \
103 default: \
104 BUG(); \
105 } \
106 return; \
107} \
108
109__define_perf_accessors(perfcntr, 0, 2)
110__define_perf_accessors(perfcntr, 1, 3)
111__define_perf_accessors(perfcntr, 2, 0)
112__define_perf_accessors(perfcntr, 3, 1)
113
114__define_perf_accessors(perfctrl, 0, 2)
115__define_perf_accessors(perfctrl, 1, 3)
116__define_perf_accessors(perfctrl, 2, 0)
117__define_perf_accessors(perfctrl, 3, 1)
118
4409af37 119static int __n_counters(void)
3a9ab99e
DCZ
120{
121 if (!(read_c0_config1() & M_CONFIG1_PC))
122 return 0;
123 if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
124 return 1;
125 if (!(read_c0_perfctrl1() & M_PERFCTL_MORE))
126 return 2;
127 if (!(read_c0_perfctrl2() & M_PERFCTL_MORE))
128 return 3;
129
130 return 4;
131}
132
4409af37 133static int n_counters(void)
3a9ab99e
DCZ
134{
135 int counters;
136
137 switch (current_cpu_type()) {
138 case CPU_R10000:
139 counters = 2;
140 break;
141
142 case CPU_R12000:
143 case CPU_R14000:
144 counters = 4;
145 break;
146
147 default:
148 counters = __n_counters();
149 }
150
151 return counters;
152}
153
154static void reset_counters(void *arg)
155{
156 int counters = (int)(long)arg;
157 switch (counters) {
158 case 4:
159 w_c0_perfctrl3(0);
160 w_c0_perfcntr3(0);
161 case 3:
162 w_c0_perfctrl2(0);
163 w_c0_perfcntr2(0);
164 case 2:
165 w_c0_perfctrl1(0);
166 w_c0_perfcntr1(0);
167 case 1:
168 w_c0_perfctrl0(0);
169 w_c0_perfcntr0(0);
170 }
171}
172
4409af37 173static u64 mipsxx_pmu_read_counter(unsigned int idx)
3a9ab99e
DCZ
174{
175 switch (idx) {
176 case 0:
177 return r_c0_perfcntr0();
178 case 1:
179 return r_c0_perfcntr1();
180 case 2:
181 return r_c0_perfcntr2();
182 case 3:
183 return r_c0_perfcntr3();
184 default:
185 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
186 return 0;
187 }
188}
189
4409af37 190static void mipsxx_pmu_write_counter(unsigned int idx, u64 val)
3a9ab99e
DCZ
191{
192 switch (idx) {
193 case 0:
194 w_c0_perfcntr0(val);
195 return;
196 case 1:
197 w_c0_perfcntr1(val);
198 return;
199 case 2:
200 w_c0_perfcntr2(val);
201 return;
202 case 3:
203 w_c0_perfcntr3(val);
204 return;
205 }
206}
207
4409af37 208static unsigned int mipsxx_pmu_read_control(unsigned int idx)
3a9ab99e
DCZ
209{
210 switch (idx) {
211 case 0:
212 return r_c0_perfctrl0();
213 case 1:
214 return r_c0_perfctrl1();
215 case 2:
216 return r_c0_perfctrl2();
217 case 3:
218 return r_c0_perfctrl3();
219 default:
220 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
221 return 0;
222 }
223}
224
4409af37 225static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
3a9ab99e
DCZ
226{
227 switch (idx) {
228 case 0:
229 w_c0_perfctrl0(val);
230 return;
231 case 1:
232 w_c0_perfctrl1(val);
233 return;
234 case 2:
235 w_c0_perfctrl2(val);
236 return;
237 case 3:
238 w_c0_perfctrl3(val);
239 return;
240 }
241}
242
243#ifdef CONFIG_MIPS_MT_SMP
244static DEFINE_RWLOCK(pmuint_rwlock);
245#endif
246
247/* 24K/34K/1004K cores can share the same event map. */
248static const struct mips_perf_event mipsxxcore_event_map
249 [PERF_COUNT_HW_MAX] = {
250 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
251 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
252 [PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID },
253 [PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID },
254 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
255 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
256 [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
257};
258
259/* 74K core has different branch event code. */
260static const struct mips_perf_event mipsxx74Kcore_event_map
261 [PERF_COUNT_HW_MAX] = {
262 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
263 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
264 [PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID },
265 [PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID },
266 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
267 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
268 [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
269};
270
271/* 24K/34K/1004K cores can share the same cache event map. */
272static const struct mips_perf_event mipsxxcore_cache_map
273 [PERF_COUNT_HW_CACHE_MAX]
274 [PERF_COUNT_HW_CACHE_OP_MAX]
275 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
276[C(L1D)] = {
277 /*
278 * Like some other architectures (e.g. ARM), the performance
279 * counters don't differentiate between read and write
280 * accesses/misses, so this isn't strictly correct, but it's the
281 * best we can do. Writes and reads get combined.
282 */
283 [C(OP_READ)] = {
284 [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
285 [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
286 },
287 [C(OP_WRITE)] = {
288 [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
289 [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
290 },
291 [C(OP_PREFETCH)] = {
292 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
293 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
294 },
295},
296[C(L1I)] = {
297 [C(OP_READ)] = {
298 [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
299 [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
300 },
301 [C(OP_WRITE)] = {
302 [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
303 [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
304 },
305 [C(OP_PREFETCH)] = {
306 [C(RESULT_ACCESS)] = { 0x14, CNTR_EVEN, T },
307 /*
308 * Note that MIPS has only "hit" events countable for
309 * the prefetch operation.
310 */
311 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
312 },
313},
314[C(LL)] = {
315 [C(OP_READ)] = {
316 [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
317 [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
318 },
319 [C(OP_WRITE)] = {
320 [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
321 [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
322 },
323 [C(OP_PREFETCH)] = {
324 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
325 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
326 },
327},
328[C(DTLB)] = {
329 [C(OP_READ)] = {
330 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
331 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
332 },
333 [C(OP_WRITE)] = {
334 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
335 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
336 },
337 [C(OP_PREFETCH)] = {
338 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
339 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
340 },
341},
342[C(ITLB)] = {
343 [C(OP_READ)] = {
344 [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
345 [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
346 },
347 [C(OP_WRITE)] = {
348 [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
349 [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
350 },
351 [C(OP_PREFETCH)] = {
352 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
353 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
354 },
355},
356[C(BPU)] = {
357 /* Using the same code for *HW_BRANCH* */
358 [C(OP_READ)] = {
359 [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
360 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
361 },
362 [C(OP_WRITE)] = {
363 [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
364 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
365 },
366 [C(OP_PREFETCH)] = {
367 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
368 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
369 },
370},
89d6c0b5
PZ
371[C(NODE)] = {
372 [C(OP_READ)] = {
373 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
374 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
375 },
376 [C(OP_WRITE)] = {
377 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
378 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
379 },
380 [C(OP_PREFETCH)] = {
381 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
382 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
383 },
384},
3a9ab99e
DCZ
385};
386
387/* 74K core has completely different cache event map. */
388static const struct mips_perf_event mipsxx74Kcore_cache_map
389 [PERF_COUNT_HW_CACHE_MAX]
390 [PERF_COUNT_HW_CACHE_OP_MAX]
391 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
392[C(L1D)] = {
393 /*
394 * Like some other architectures (e.g. ARM), the performance
395 * counters don't differentiate between read and write
396 * accesses/misses, so this isn't strictly correct, but it's the
397 * best we can do. Writes and reads get combined.
398 */
399 [C(OP_READ)] = {
400 [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
401 [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
402 },
403 [C(OP_WRITE)] = {
404 [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
405 [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
406 },
407 [C(OP_PREFETCH)] = {
408 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
409 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
410 },
411},
412[C(L1I)] = {
413 [C(OP_READ)] = {
414 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
415 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
416 },
417 [C(OP_WRITE)] = {
418 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
419 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
420 },
421 [C(OP_PREFETCH)] = {
422 [C(RESULT_ACCESS)] = { 0x34, CNTR_EVEN, T },
423 /*
424 * Note that MIPS has only "hit" events countable for
425 * the prefetch operation.
426 */
427 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
428 },
429},
430[C(LL)] = {
431 [C(OP_READ)] = {
432 [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
433 [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P },
434 },
435 [C(OP_WRITE)] = {
436 [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
437 [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P },
438 },
439 [C(OP_PREFETCH)] = {
440 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
441 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
442 },
443},
444[C(DTLB)] = {
445 /* 74K core does not have specific DTLB events. */
446 [C(OP_READ)] = {
447 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
448 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
449 },
450 [C(OP_WRITE)] = {
451 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
452 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
453 },
454 [C(OP_PREFETCH)] = {
455 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
456 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
457 },
458},
459[C(ITLB)] = {
460 [C(OP_READ)] = {
461 [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
462 [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
463 },
464 [C(OP_WRITE)] = {
465 [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
466 [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
467 },
468 [C(OP_PREFETCH)] = {
469 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
470 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
471 },
472},
473[C(BPU)] = {
474 /* Using the same code for *HW_BRANCH* */
475 [C(OP_READ)] = {
476 [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
477 [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
478 },
479 [C(OP_WRITE)] = {
480 [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
481 [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
482 },
483 [C(OP_PREFETCH)] = {
484 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
485 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
486 },
487},
89d6c0b5
PZ
488[C(NODE)] = {
489 [C(OP_READ)] = {
490 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
491 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
492 },
493 [C(OP_WRITE)] = {
494 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
495 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
496 },
497 [C(OP_PREFETCH)] = {
498 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
499 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
500 },
501},
3a9ab99e
DCZ
502};
503
504#ifdef CONFIG_MIPS_MT_SMP
4409af37
DD
505static void check_and_calc_range(struct perf_event *event,
506 const struct mips_perf_event *pev)
3a9ab99e
DCZ
507{
508 struct hw_perf_event *hwc = &event->hw;
509
510 if (event->cpu >= 0) {
511 if (pev->range > V) {
512 /*
513 * The user selected an event that is processor
514 * wide, while expecting it to be VPE wide.
515 */
516 hwc->config_base |= M_TC_EN_ALL;
517 } else {
518 /*
519 * FIXME: cpu_data[event->cpu].vpe_id reports 0
520 * for both CPUs.
521 */
522 hwc->config_base |= M_PERFCTL_VPEID(event->cpu);
523 hwc->config_base |= M_TC_EN_VPE;
524 }
525 } else
526 hwc->config_base |= M_TC_EN_ALL;
527}
528#else
4409af37
DD
529static void check_and_calc_range(struct perf_event *event,
530 const struct mips_perf_event *pev)
3a9ab99e
DCZ
531{
532}
533#endif
534
535static int __hw_perf_event_init(struct perf_event *event)
536{
537 struct perf_event_attr *attr = &event->attr;
538 struct hw_perf_event *hwc = &event->hw;
539 const struct mips_perf_event *pev;
540 int err;
541
542 /* Returning MIPS event descriptor for generic perf event. */
543 if (PERF_TYPE_HARDWARE == event->attr.type) {
544 if (event->attr.config >= PERF_COUNT_HW_MAX)
545 return -EINVAL;
546 pev = mipspmu_map_general_event(event->attr.config);
547 } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
548 pev = mipspmu_map_cache_event(event->attr.config);
549 } else if (PERF_TYPE_RAW == event->attr.type) {
550 /* We are working on the global raw event. */
551 mutex_lock(&raw_event_mutex);
552 pev = mipspmu->map_raw_event(event->attr.config);
553 } else {
554 /* The event type is not (yet) supported. */
555 return -EOPNOTSUPP;
556 }
557
558 if (IS_ERR(pev)) {
559 if (PERF_TYPE_RAW == event->attr.type)
560 mutex_unlock(&raw_event_mutex);
561 return PTR_ERR(pev);
562 }
563
564 /*
565 * We allow max flexibility on how each individual counter shared
566 * by the single CPU operates (the mode exclusion and the range).
567 */
568 hwc->config_base = M_PERFCTL_INTERRUPT_ENABLE;
569
570 /* Calculate range bits and validate it. */
571 if (num_possible_cpus() > 1)
572 check_and_calc_range(event, pev);
573
574 hwc->event_base = mipspmu_perf_event_encode(pev);
575 if (PERF_TYPE_RAW == event->attr.type)
576 mutex_unlock(&raw_event_mutex);
577
578 if (!attr->exclude_user)
579 hwc->config_base |= M_PERFCTL_USER;
580 if (!attr->exclude_kernel) {
581 hwc->config_base |= M_PERFCTL_KERNEL;
582 /* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
583 hwc->config_base |= M_PERFCTL_EXL;
584 }
585 if (!attr->exclude_hv)
586 hwc->config_base |= M_PERFCTL_SUPERVISOR;
587
588 hwc->config_base &= M_PERFCTL_CONFIG_MASK;
589 /*
590 * The event can belong to another cpu. We do not assign a local
591 * counter for it for now.
592 */
593 hwc->idx = -1;
594 hwc->config = 0;
595
596 if (!hwc->sample_period) {
597 hwc->sample_period = MAX_PERIOD;
598 hwc->last_period = hwc->sample_period;
599 local64_set(&hwc->period_left, hwc->sample_period);
600 }
601
602 err = 0;
603 if (event->group_leader != event) {
604 err = validate_group(event);
605 if (err)
606 return -EINVAL;
607 }
608
609 event->destroy = hw_perf_event_destroy;
610
611 return err;
612}
613
614static void pause_local_counters(void)
615{
616 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
617 int counters = mipspmu->num_counters;
618 unsigned long flags;
619
620 local_irq_save(flags);
621 switch (counters) {
622 case 4:
623 cpuc->saved_ctrl[3] = r_c0_perfctrl3();
624 w_c0_perfctrl3(cpuc->saved_ctrl[3] &
625 ~M_PERFCTL_COUNT_EVENT_WHENEVER);
626 case 3:
627 cpuc->saved_ctrl[2] = r_c0_perfctrl2();
628 w_c0_perfctrl2(cpuc->saved_ctrl[2] &
629 ~M_PERFCTL_COUNT_EVENT_WHENEVER);
630 case 2:
631 cpuc->saved_ctrl[1] = r_c0_perfctrl1();
632 w_c0_perfctrl1(cpuc->saved_ctrl[1] &
633 ~M_PERFCTL_COUNT_EVENT_WHENEVER);
634 case 1:
635 cpuc->saved_ctrl[0] = r_c0_perfctrl0();
636 w_c0_perfctrl0(cpuc->saved_ctrl[0] &
637 ~M_PERFCTL_COUNT_EVENT_WHENEVER);
638 }
639 local_irq_restore(flags);
640}
641
642static void resume_local_counters(void)
643{
644 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
645 int counters = mipspmu->num_counters;
646 unsigned long flags;
647
648 local_irq_save(flags);
649 switch (counters) {
650 case 4:
651 w_c0_perfctrl3(cpuc->saved_ctrl[3]);
652 case 3:
653 w_c0_perfctrl2(cpuc->saved_ctrl[2]);
654 case 2:
655 w_c0_perfctrl1(cpuc->saved_ctrl[1]);
656 case 1:
657 w_c0_perfctrl0(cpuc->saved_ctrl[0]);
658 }
659 local_irq_restore(flags);
660}
661
662static int mipsxx_pmu_handle_shared_irq(void)
663{
664 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
665 struct perf_sample_data data;
666 unsigned int counters = mipspmu->num_counters;
667 unsigned int counter;
668 int handled = IRQ_NONE;
669 struct pt_regs *regs;
670
671 if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26)))
672 return handled;
673
674 /*
675 * First we pause the local counters, so that when we are locked
676 * here, the counters are all paused. When it gets locked due to
677 * perf_disable(), the timer interrupt handler will be delayed.
678 *
679 * See also mipsxx_pmu_start().
680 */
681 pause_local_counters();
682#ifdef CONFIG_MIPS_MT_SMP
683 read_lock(&pmuint_rwlock);
684#endif
685
686 regs = get_irq_regs();
687
688 perf_sample_data_init(&data, 0);
689
690 switch (counters) {
691#define HANDLE_COUNTER(n) \
692 case n + 1: \
693 if (test_bit(n, cpuc->used_mask)) { \
694 counter = r_c0_perfcntr ## n(); \
695 if (counter & M_COUNTER_OVERFLOW) { \
696 w_c0_perfcntr ## n(counter & \
697 VALID_COUNT); \
698 if (test_and_change_bit(n, cpuc->msbs)) \
699 handle_associated_event(cpuc, \
700 n, &data, regs); \
701 handled = IRQ_HANDLED; \
702 } \
703 }
704 HANDLE_COUNTER(3)
705 HANDLE_COUNTER(2)
706 HANDLE_COUNTER(1)
707 HANDLE_COUNTER(0)
708 }
709
710 /*
711 * Do all the work for the pending perf events. We can do this
712 * in here because the performance counter interrupt is a regular
713 * interrupt, not NMI.
714 */
715 if (handled == IRQ_HANDLED)
91f01737 716 irq_work_run();
3a9ab99e
DCZ
717
718#ifdef CONFIG_MIPS_MT_SMP
719 read_unlock(&pmuint_rwlock);
720#endif
721 resume_local_counters();
722 return handled;
723}
724
4409af37 725static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
3a9ab99e
DCZ
726{
727 return mipsxx_pmu_handle_shared_irq();
728}
729
730static void mipsxx_pmu_start(void)
731{
732#ifdef CONFIG_MIPS_MT_SMP
733 write_unlock(&pmuint_rwlock);
734#endif
735 resume_local_counters();
736}
737
738/*
739 * MIPS performance counters can be per-TC. The control registers can
25985edc 740 * not be directly accessed across CPUs. Hence if we want to do global
3a9ab99e
DCZ
741 * control, we need cross CPU calls. on_each_cpu() can help us, but we
742 * can not make sure this function is called with interrupts enabled. So
743 * here we pause local counters and then grab a rwlock and leave the
744 * counters on other CPUs alone. If any counter interrupt raises while
745 * we own the write lock, simply pause local counters on that CPU and
746 * spin in the handler. Also we know we won't be switched to another
747 * CPU after pausing local counters and before grabbing the lock.
748 */
749static void mipsxx_pmu_stop(void)
750{
751 pause_local_counters();
752#ifdef CONFIG_MIPS_MT_SMP
753 write_lock(&pmuint_rwlock);
754#endif
755}
756
4409af37
DD
757static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
758 struct hw_perf_event *hwc)
3a9ab99e
DCZ
759{
760 int i;
761
762 /*
763 * We only need to care the counter mask. The range has been
764 * checked definitely.
765 */
766 unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff;
767
768 for (i = mipspmu->num_counters - 1; i >= 0; i--) {
769 /*
770 * Note that some MIPS perf events can be counted by both
771 * even and odd counters, wheresas many other are only by
772 * even _or_ odd counters. This introduces an issue that
773 * when the former kind of event takes the counter the
774 * latter kind of event wants to use, then the "counter
775 * allocation" for the latter event will fail. In fact if
776 * they can be dynamically swapped, they both feel happy.
777 * But here we leave this issue alone for now.
778 */
779 if (test_bit(i, &cntr_mask) &&
780 !test_and_set_bit(i, cpuc->used_mask))
781 return i;
782 }
783
784 return -EAGAIN;
785}
786
4409af37 787static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
3a9ab99e
DCZ
788{
789 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
790 unsigned long flags;
791
792 WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
793
794 local_irq_save(flags);
795 cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
796 (evt->config_base & M_PERFCTL_CONFIG_MASK) |
797 /* Make sure interrupt enabled. */
798 M_PERFCTL_INTERRUPT_ENABLE;
799 /*
800 * We do not actually let the counter run. Leave it until start().
801 */
802 local_irq_restore(flags);
803}
804
4409af37 805static void mipsxx_pmu_disable_event(int idx)
3a9ab99e
DCZ
806{
807 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
808 unsigned long flags;
809
810 WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
811
812 local_irq_save(flags);
813 cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
814 ~M_PERFCTL_COUNT_EVENT_WHENEVER;
815 mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
816 local_irq_restore(flags);
817}
818
819/* 24K */
820#define IS_UNSUPPORTED_24K_EVENT(r, b) \
821 ((b) == 12 || (r) == 151 || (r) == 152 || (b) == 26 || \
822 (b) == 27 || (r) == 28 || (r) == 158 || (b) == 31 || \
823 (b) == 32 || (b) == 34 || (b) == 36 || (r) == 168 || \
824 (r) == 172 || (b) == 47 || ((b) >= 56 && (b) <= 63) || \
825 ((b) >= 68 && (b) <= 127))
826#define IS_BOTH_COUNTERS_24K_EVENT(b) \
827 ((b) == 0 || (b) == 1 || (b) == 11)
828
829/* 34K */
830#define IS_UNSUPPORTED_34K_EVENT(r, b) \
831 ((b) == 12 || (r) == 27 || (r) == 158 || (b) == 36 || \
832 (b) == 38 || (r) == 175 || ((b) >= 56 && (b) <= 63) || \
833 ((b) >= 68 && (b) <= 127))
834#define IS_BOTH_COUNTERS_34K_EVENT(b) \
835 ((b) == 0 || (b) == 1 || (b) == 11)
836#ifdef CONFIG_MIPS_MT_SMP
837#define IS_RANGE_P_34K_EVENT(r, b) \
838 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
839 (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 || \
840 (r) == 176 || ((b) >= 50 && (b) <= 55) || \
841 ((b) >= 64 && (b) <= 67))
842#define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
843#endif
844
845/* 74K */
846#define IS_UNSUPPORTED_74K_EVENT(r, b) \
847 ((r) == 5 || ((r) >= 135 && (r) <= 137) || \
848 ((b) >= 10 && (b) <= 12) || (b) == 22 || (b) == 27 || \
849 (b) == 33 || (b) == 34 || ((b) >= 47 && (b) <= 49) || \
850 (r) == 178 || (b) == 55 || (b) == 57 || (b) == 60 || \
851 (b) == 61 || (r) == 62 || (r) == 191 || \
852 ((b) >= 64 && (b) <= 127))
853#define IS_BOTH_COUNTERS_74K_EVENT(b) \
854 ((b) == 0 || (b) == 1)
855
856/* 1004K */
857#define IS_UNSUPPORTED_1004K_EVENT(r, b) \
858 ((b) == 12 || (r) == 27 || (r) == 158 || (b) == 38 || \
859 (r) == 175 || (b) == 63 || ((b) >= 68 && (b) <= 127))
860#define IS_BOTH_COUNTERS_1004K_EVENT(b) \
861 ((b) == 0 || (b) == 1 || (b) == 11)
862#ifdef CONFIG_MIPS_MT_SMP
863#define IS_RANGE_P_1004K_EVENT(r, b) \
864 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
865 (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 || \
866 (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) || \
867 (r) == 188 || (b) == 61 || (b) == 62 || \
868 ((b) >= 64 && (b) <= 67))
869#define IS_RANGE_V_1004K_EVENT(r) ((r) == 47)
870#endif
871
872/*
873 * User can use 0-255 raw events, where 0-127 for the events of even
874 * counters, and 128-255 for odd counters. Note that bit 7 is used to
875 * indicate the parity. So, for example, when user wants to take the
876 * Event Num of 15 for odd counters (by referring to the user manual),
877 * then 128 needs to be added to 15 as the input for the event config,
878 * i.e., 143 (0x8F) to be used.
879 */
4409af37 880static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
3a9ab99e
DCZ
881{
882 unsigned int raw_id = config & 0xff;
883 unsigned int base_id = raw_id & 0x7f;
884
885 switch (current_cpu_type()) {
886 case CPU_24K:
887 if (IS_UNSUPPORTED_24K_EVENT(raw_id, base_id))
888 return ERR_PTR(-EOPNOTSUPP);
889 raw_event.event_id = base_id;
890 if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
891 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
892 else
893 raw_event.cntr_mask =
894 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
895#ifdef CONFIG_MIPS_MT_SMP
896 /*
897 * This is actually doing nothing. Non-multithreading
898 * CPUs will not check and calculate the range.
899 */
900 raw_event.range = P;
901#endif
902 break;
903 case CPU_34K:
904 if (IS_UNSUPPORTED_34K_EVENT(raw_id, base_id))
905 return ERR_PTR(-EOPNOTSUPP);
906 raw_event.event_id = base_id;
907 if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
908 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
909 else
910 raw_event.cntr_mask =
911 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
912#ifdef CONFIG_MIPS_MT_SMP
913 if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
914 raw_event.range = P;
915 else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
916 raw_event.range = V;
917 else
918 raw_event.range = T;
919#endif
920 break;
921 case CPU_74K:
922 if (IS_UNSUPPORTED_74K_EVENT(raw_id, base_id))
923 return ERR_PTR(-EOPNOTSUPP);
924 raw_event.event_id = base_id;
925 if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
926 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
927 else
928 raw_event.cntr_mask =
929 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
930#ifdef CONFIG_MIPS_MT_SMP
931 raw_event.range = P;
932#endif
933 break;
934 case CPU_1004K:
935 if (IS_UNSUPPORTED_1004K_EVENT(raw_id, base_id))
936 return ERR_PTR(-EOPNOTSUPP);
937 raw_event.event_id = base_id;
938 if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
939 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
940 else
941 raw_event.cntr_mask =
942 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
943#ifdef CONFIG_MIPS_MT_SMP
944 if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
945 raw_event.range = P;
946 else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
947 raw_event.range = V;
948 else
949 raw_event.range = T;
950#endif
951 break;
952 }
953
954 return &raw_event;
955}
956
957static struct mips_pmu mipsxxcore_pmu = {
958 .handle_irq = mipsxx_pmu_handle_irq,
959 .handle_shared_irq = mipsxx_pmu_handle_shared_irq,
960 .start = mipsxx_pmu_start,
961 .stop = mipsxx_pmu_stop,
962 .alloc_counter = mipsxx_pmu_alloc_counter,
963 .read_counter = mipsxx_pmu_read_counter,
964 .write_counter = mipsxx_pmu_write_counter,
965 .enable_event = mipsxx_pmu_enable_event,
966 .disable_event = mipsxx_pmu_disable_event,
967 .map_raw_event = mipsxx_pmu_map_raw_event,
968 .general_event_map = &mipsxxcore_event_map,
969 .cache_event_map = &mipsxxcore_cache_map,
970};
971
972static struct mips_pmu mipsxx74Kcore_pmu = {
973 .handle_irq = mipsxx_pmu_handle_irq,
974 .handle_shared_irq = mipsxx_pmu_handle_shared_irq,
975 .start = mipsxx_pmu_start,
976 .stop = mipsxx_pmu_stop,
977 .alloc_counter = mipsxx_pmu_alloc_counter,
978 .read_counter = mipsxx_pmu_read_counter,
979 .write_counter = mipsxx_pmu_write_counter,
980 .enable_event = mipsxx_pmu_enable_event,
981 .disable_event = mipsxx_pmu_disable_event,
982 .map_raw_event = mipsxx_pmu_map_raw_event,
983 .general_event_map = &mipsxx74Kcore_event_map,
984 .cache_event_map = &mipsxx74Kcore_cache_map,
985};
986
987static int __init
988init_hw_perf_events(void)
989{
990 int counters, irq;
991
992 pr_info("Performance counters: ");
993
994 counters = n_counters();
995 if (counters == 0) {
996 pr_cont("No available PMU.\n");
997 return -ENODEV;
998 }
999
1000#ifdef CONFIG_MIPS_MT_SMP
1001 cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
1002 if (!cpu_has_mipsmt_pertccounters)
1003 counters = counters_total_to_per_cpu(counters);
1004#endif
1005
1006#ifdef MSC01E_INT_BASE
1007 if (cpu_has_veic) {
1008 /*
1009 * Using platform specific interrupt controller defines.
1010 */
1011 irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
1012 } else {
1013#endif
1014 if (cp0_perfcount_irq >= 0)
1015 irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
1016 else
1017 irq = -1;
1018#ifdef MSC01E_INT_BASE
1019 }
1020#endif
1021
1022 on_each_cpu(reset_counters, (void *)(long)counters, 1);
1023
1024 switch (current_cpu_type()) {
1025 case CPU_24K:
1026 mipsxxcore_pmu.name = "mips/24K";
1027 mipsxxcore_pmu.num_counters = counters;
1028 mipsxxcore_pmu.irq = irq;
1029 mipspmu = &mipsxxcore_pmu;
1030 break;
1031 case CPU_34K:
1032 mipsxxcore_pmu.name = "mips/34K";
1033 mipsxxcore_pmu.num_counters = counters;
1034 mipsxxcore_pmu.irq = irq;
1035 mipspmu = &mipsxxcore_pmu;
1036 break;
1037 case CPU_74K:
1038 mipsxx74Kcore_pmu.name = "mips/74K";
1039 mipsxx74Kcore_pmu.num_counters = counters;
1040 mipsxx74Kcore_pmu.irq = irq;
1041 mipspmu = &mipsxx74Kcore_pmu;
1042 break;
1043 case CPU_1004K:
1044 mipsxxcore_pmu.name = "mips/1004K";
1045 mipsxxcore_pmu.num_counters = counters;
1046 mipsxxcore_pmu.irq = irq;
1047 mipspmu = &mipsxxcore_pmu;
1048 break;
1049 default:
1050 pr_cont("Either hardware does not support performance "
1051 "counters, or not yet implemented.\n");
1052 return -ENODEV;
1053 }
1054
1055 if (mipspmu)
1056 pr_cont("%s PMU enabled, %d counters available to each "
1057 "CPU, irq %d%s\n", mipspmu->name, counters, irq,
1058 irq < 0 ? " (share with timer interrupt)" : "");
1059
404ff638
DCZ
1060 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1061
3a9ab99e
DCZ
1062 return 0;
1063}
004417a6 1064early_initcall(init_hw_perf_events);
3a9ab99e
DCZ
1065
1066#endif /* defined(CONFIG_CPU_MIPS32)... */
This page took 0.149853 seconds and 5 git commands to generate.