| 1 | /* |
| 2 | * ARMv5 [xscale] Performance counter handling code. |
| 3 | * |
| 4 | * Copyright (C) 2010, ARM Ltd., Will Deacon <will.deacon@arm.com> |
| 5 | * |
| 6 | * Based on the previous xscale OProfile code. |
| 7 | * |
| 8 | * There are two variants of the xscale PMU that we support: |
| 9 | * - xscale1pmu: 2 event counters and a cycle counter |
| 10 | * - xscale2pmu: 4 event counters and a cycle counter |
| 11 | * The two variants share event definitions, but have different |
| 12 | * PMU structures. |
| 13 | */ |
| 14 | |
| 15 | #ifdef CONFIG_CPU_XSCALE |
| 16 | |
| 17 | #include <asm/cputype.h> |
| 18 | #include <asm/irq_regs.h> |
| 19 | |
| 20 | #include <linux/of.h> |
| 21 | #include <linux/perf/arm_pmu.h> |
| 22 | #include <linux/platform_device.h> |
| 23 | |
| 24 | enum xscale_perf_types { |
| 25 | XSCALE_PERFCTR_ICACHE_MISS = 0x00, |
| 26 | XSCALE_PERFCTR_ICACHE_NO_DELIVER = 0x01, |
| 27 | XSCALE_PERFCTR_DATA_STALL = 0x02, |
| 28 | XSCALE_PERFCTR_ITLB_MISS = 0x03, |
| 29 | XSCALE_PERFCTR_DTLB_MISS = 0x04, |
| 30 | XSCALE_PERFCTR_BRANCH = 0x05, |
| 31 | XSCALE_PERFCTR_BRANCH_MISS = 0x06, |
| 32 | XSCALE_PERFCTR_INSTRUCTION = 0x07, |
| 33 | XSCALE_PERFCTR_DCACHE_FULL_STALL = 0x08, |
| 34 | XSCALE_PERFCTR_DCACHE_FULL_STALL_CONTIG = 0x09, |
| 35 | XSCALE_PERFCTR_DCACHE_ACCESS = 0x0A, |
| 36 | XSCALE_PERFCTR_DCACHE_MISS = 0x0B, |
| 37 | XSCALE_PERFCTR_DCACHE_WRITE_BACK = 0x0C, |
| 38 | XSCALE_PERFCTR_PC_CHANGED = 0x0D, |
| 39 | XSCALE_PERFCTR_BCU_REQUEST = 0x10, |
| 40 | XSCALE_PERFCTR_BCU_FULL = 0x11, |
| 41 | XSCALE_PERFCTR_BCU_DRAIN = 0x12, |
| 42 | XSCALE_PERFCTR_BCU_ECC_NO_ELOG = 0x14, |
| 43 | XSCALE_PERFCTR_BCU_1_BIT_ERR = 0x15, |
| 44 | XSCALE_PERFCTR_RMW = 0x16, |
| 45 | /* XSCALE_PERFCTR_CCNT is not hardware defined */ |
| 46 | XSCALE_PERFCTR_CCNT = 0xFE, |
| 47 | XSCALE_PERFCTR_UNUSED = 0xFF, |
| 48 | }; |
| 49 | |
| 50 | enum xscale_counters { |
| 51 | XSCALE_CYCLE_COUNTER = 0, |
| 52 | XSCALE_COUNTER0, |
| 53 | XSCALE_COUNTER1, |
| 54 | XSCALE_COUNTER2, |
| 55 | XSCALE_COUNTER3, |
| 56 | }; |
| 57 | |
| 58 | static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = { |
| 59 | PERF_MAP_ALL_UNSUPPORTED, |
| 60 | [PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT, |
| 61 | [PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION, |
| 62 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH, |
| 63 | [PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS, |
| 64 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = XSCALE_PERFCTR_ICACHE_NO_DELIVER, |
| 65 | }; |
| 66 | |
| 67 | static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] |
| 68 | [PERF_COUNT_HW_CACHE_OP_MAX] |
| 69 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { |
| 70 | PERF_CACHE_MAP_ALL_UNSUPPORTED, |
| 71 | |
| 72 | [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS, |
| 73 | [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS, |
| 74 | [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS, |
| 75 | [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS, |
| 76 | |
| 77 | [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS, |
| 78 | |
| 79 | [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS, |
| 80 | [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS, |
| 81 | |
| 82 | [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS, |
| 83 | [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS, |
| 84 | }; |
| 85 | |
| 86 | #define XSCALE_PMU_ENABLE 0x001 |
| 87 | #define XSCALE_PMN_RESET 0x002 |
| 88 | #define XSCALE_CCNT_RESET 0x004 |
| 89 | #define XSCALE_PMU_RESET (CCNT_RESET | PMN_RESET) |
| 90 | #define XSCALE_PMU_CNT64 0x008 |
| 91 | |
| 92 | #define XSCALE1_OVERFLOWED_MASK 0x700 |
| 93 | #define XSCALE1_CCOUNT_OVERFLOW 0x400 |
| 94 | #define XSCALE1_COUNT0_OVERFLOW 0x100 |
| 95 | #define XSCALE1_COUNT1_OVERFLOW 0x200 |
| 96 | #define XSCALE1_CCOUNT_INT_EN 0x040 |
| 97 | #define XSCALE1_COUNT0_INT_EN 0x010 |
| 98 | #define XSCALE1_COUNT1_INT_EN 0x020 |
| 99 | #define XSCALE1_COUNT0_EVT_SHFT 12 |
| 100 | #define XSCALE1_COUNT0_EVT_MASK (0xff << XSCALE1_COUNT0_EVT_SHFT) |
| 101 | #define XSCALE1_COUNT1_EVT_SHFT 20 |
| 102 | #define XSCALE1_COUNT1_EVT_MASK (0xff << XSCALE1_COUNT1_EVT_SHFT) |
| 103 | |
| 104 | static inline u32 |
| 105 | xscale1pmu_read_pmnc(void) |
| 106 | { |
| 107 | u32 val; |
| 108 | asm volatile("mrc p14, 0, %0, c0, c0, 0" : "=r" (val)); |
| 109 | return val; |
| 110 | } |
| 111 | |
| 112 | static inline void |
| 113 | xscale1pmu_write_pmnc(u32 val) |
| 114 | { |
| 115 | /* upper 4bits and 7, 11 are write-as-0 */ |
| 116 | val &= 0xffff77f; |
| 117 | asm volatile("mcr p14, 0, %0, c0, c0, 0" : : "r" (val)); |
| 118 | } |
| 119 | |
| 120 | static inline int |
| 121 | xscale1_pmnc_counter_has_overflowed(unsigned long pmnc, |
| 122 | enum xscale_counters counter) |
| 123 | { |
| 124 | int ret = 0; |
| 125 | |
| 126 | switch (counter) { |
| 127 | case XSCALE_CYCLE_COUNTER: |
| 128 | ret = pmnc & XSCALE1_CCOUNT_OVERFLOW; |
| 129 | break; |
| 130 | case XSCALE_COUNTER0: |
| 131 | ret = pmnc & XSCALE1_COUNT0_OVERFLOW; |
| 132 | break; |
| 133 | case XSCALE_COUNTER1: |
| 134 | ret = pmnc & XSCALE1_COUNT1_OVERFLOW; |
| 135 | break; |
| 136 | default: |
| 137 | WARN_ONCE(1, "invalid counter number (%d)\n", counter); |
| 138 | } |
| 139 | |
| 140 | return ret; |
| 141 | } |
| 142 | |
| 143 | static irqreturn_t |
| 144 | xscale1pmu_handle_irq(int irq_num, void *dev) |
| 145 | { |
| 146 | unsigned long pmnc; |
| 147 | struct perf_sample_data data; |
| 148 | struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev; |
| 149 | struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); |
| 150 | struct pt_regs *regs; |
| 151 | int idx; |
| 152 | |
| 153 | /* |
| 154 | * NOTE: there's an A stepping erratum that states if an overflow |
| 155 | * bit already exists and another occurs, the previous |
| 156 | * Overflow bit gets cleared. There's no workaround. |
| 157 | * Fixed in B stepping or later. |
| 158 | */ |
| 159 | pmnc = xscale1pmu_read_pmnc(); |
| 160 | |
| 161 | /* |
| 162 | * Write the value back to clear the overflow flags. Overflow |
| 163 | * flags remain in pmnc for use below. We also disable the PMU |
| 164 | * while we process the interrupt. |
| 165 | */ |
| 166 | xscale1pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE); |
| 167 | |
| 168 | if (!(pmnc & XSCALE1_OVERFLOWED_MASK)) |
| 169 | return IRQ_NONE; |
| 170 | |
| 171 | regs = get_irq_regs(); |
| 172 | |
| 173 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
| 174 | struct perf_event *event = cpuc->events[idx]; |
| 175 | struct hw_perf_event *hwc; |
| 176 | |
| 177 | if (!event) |
| 178 | continue; |
| 179 | |
| 180 | if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx)) |
| 181 | continue; |
| 182 | |
| 183 | hwc = &event->hw; |
| 184 | armpmu_event_update(event); |
| 185 | perf_sample_data_init(&data, 0, hwc->last_period); |
| 186 | if (!armpmu_event_set_period(event)) |
| 187 | continue; |
| 188 | |
| 189 | if (perf_event_overflow(event, &data, regs)) |
| 190 | cpu_pmu->disable(event); |
| 191 | } |
| 192 | |
| 193 | irq_work_run(); |
| 194 | |
| 195 | /* |
| 196 | * Re-enable the PMU. |
| 197 | */ |
| 198 | pmnc = xscale1pmu_read_pmnc() | XSCALE_PMU_ENABLE; |
| 199 | xscale1pmu_write_pmnc(pmnc); |
| 200 | |
| 201 | return IRQ_HANDLED; |
| 202 | } |
| 203 | |
| 204 | static void xscale1pmu_enable_event(struct perf_event *event) |
| 205 | { |
| 206 | unsigned long val, mask, evt, flags; |
| 207 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); |
| 208 | struct hw_perf_event *hwc = &event->hw; |
| 209 | struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); |
| 210 | int idx = hwc->idx; |
| 211 | |
| 212 | switch (idx) { |
| 213 | case XSCALE_CYCLE_COUNTER: |
| 214 | mask = 0; |
| 215 | evt = XSCALE1_CCOUNT_INT_EN; |
| 216 | break; |
| 217 | case XSCALE_COUNTER0: |
| 218 | mask = XSCALE1_COUNT0_EVT_MASK; |
| 219 | evt = (hwc->config_base << XSCALE1_COUNT0_EVT_SHFT) | |
| 220 | XSCALE1_COUNT0_INT_EN; |
| 221 | break; |
| 222 | case XSCALE_COUNTER1: |
| 223 | mask = XSCALE1_COUNT1_EVT_MASK; |
| 224 | evt = (hwc->config_base << XSCALE1_COUNT1_EVT_SHFT) | |
| 225 | XSCALE1_COUNT1_INT_EN; |
| 226 | break; |
| 227 | default: |
| 228 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); |
| 229 | return; |
| 230 | } |
| 231 | |
| 232 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
| 233 | val = xscale1pmu_read_pmnc(); |
| 234 | val &= ~mask; |
| 235 | val |= evt; |
| 236 | xscale1pmu_write_pmnc(val); |
| 237 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
| 238 | } |
| 239 | |
| 240 | static void xscale1pmu_disable_event(struct perf_event *event) |
| 241 | { |
| 242 | unsigned long val, mask, evt, flags; |
| 243 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); |
| 244 | struct hw_perf_event *hwc = &event->hw; |
| 245 | struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); |
| 246 | int idx = hwc->idx; |
| 247 | |
| 248 | switch (idx) { |
| 249 | case XSCALE_CYCLE_COUNTER: |
| 250 | mask = XSCALE1_CCOUNT_INT_EN; |
| 251 | evt = 0; |
| 252 | break; |
| 253 | case XSCALE_COUNTER0: |
| 254 | mask = XSCALE1_COUNT0_INT_EN | XSCALE1_COUNT0_EVT_MASK; |
| 255 | evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT0_EVT_SHFT; |
| 256 | break; |
| 257 | case XSCALE_COUNTER1: |
| 258 | mask = XSCALE1_COUNT1_INT_EN | XSCALE1_COUNT1_EVT_MASK; |
| 259 | evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT1_EVT_SHFT; |
| 260 | break; |
| 261 | default: |
| 262 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); |
| 263 | return; |
| 264 | } |
| 265 | |
| 266 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
| 267 | val = xscale1pmu_read_pmnc(); |
| 268 | val &= ~mask; |
| 269 | val |= evt; |
| 270 | xscale1pmu_write_pmnc(val); |
| 271 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
| 272 | } |
| 273 | |
| 274 | static int |
| 275 | xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc, |
| 276 | struct perf_event *event) |
| 277 | { |
| 278 | struct hw_perf_event *hwc = &event->hw; |
| 279 | if (XSCALE_PERFCTR_CCNT == hwc->config_base) { |
| 280 | if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask)) |
| 281 | return -EAGAIN; |
| 282 | |
| 283 | return XSCALE_CYCLE_COUNTER; |
| 284 | } else { |
| 285 | if (!test_and_set_bit(XSCALE_COUNTER1, cpuc->used_mask)) |
| 286 | return XSCALE_COUNTER1; |
| 287 | |
| 288 | if (!test_and_set_bit(XSCALE_COUNTER0, cpuc->used_mask)) |
| 289 | return XSCALE_COUNTER0; |
| 290 | |
| 291 | return -EAGAIN; |
| 292 | } |
| 293 | } |
| 294 | |
| 295 | static void xscale1pmu_start(struct arm_pmu *cpu_pmu) |
| 296 | { |
| 297 | unsigned long flags, val; |
| 298 | struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); |
| 299 | |
| 300 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
| 301 | val = xscale1pmu_read_pmnc(); |
| 302 | val |= XSCALE_PMU_ENABLE; |
| 303 | xscale1pmu_write_pmnc(val); |
| 304 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
| 305 | } |
| 306 | |
| 307 | static void xscale1pmu_stop(struct arm_pmu *cpu_pmu) |
| 308 | { |
| 309 | unsigned long flags, val; |
| 310 | struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); |
| 311 | |
| 312 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
| 313 | val = xscale1pmu_read_pmnc(); |
| 314 | val &= ~XSCALE_PMU_ENABLE; |
| 315 | xscale1pmu_write_pmnc(val); |
| 316 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
| 317 | } |
| 318 | |
| 319 | static inline u32 xscale1pmu_read_counter(struct perf_event *event) |
| 320 | { |
| 321 | struct hw_perf_event *hwc = &event->hw; |
| 322 | int counter = hwc->idx; |
| 323 | u32 val = 0; |
| 324 | |
| 325 | switch (counter) { |
| 326 | case XSCALE_CYCLE_COUNTER: |
| 327 | asm volatile("mrc p14, 0, %0, c1, c0, 0" : "=r" (val)); |
| 328 | break; |
| 329 | case XSCALE_COUNTER0: |
| 330 | asm volatile("mrc p14, 0, %0, c2, c0, 0" : "=r" (val)); |
| 331 | break; |
| 332 | case XSCALE_COUNTER1: |
| 333 | asm volatile("mrc p14, 0, %0, c3, c0, 0" : "=r" (val)); |
| 334 | break; |
| 335 | } |
| 336 | |
| 337 | return val; |
| 338 | } |
| 339 | |
| 340 | static inline void xscale1pmu_write_counter(struct perf_event *event, u32 val) |
| 341 | { |
| 342 | struct hw_perf_event *hwc = &event->hw; |
| 343 | int counter = hwc->idx; |
| 344 | |
| 345 | switch (counter) { |
| 346 | case XSCALE_CYCLE_COUNTER: |
| 347 | asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val)); |
| 348 | break; |
| 349 | case XSCALE_COUNTER0: |
| 350 | asm volatile("mcr p14, 0, %0, c2, c0, 0" : : "r" (val)); |
| 351 | break; |
| 352 | case XSCALE_COUNTER1: |
| 353 | asm volatile("mcr p14, 0, %0, c3, c0, 0" : : "r" (val)); |
| 354 | break; |
| 355 | } |
| 356 | } |
| 357 | |
| 358 | static int xscale_map_event(struct perf_event *event) |
| 359 | { |
| 360 | return armpmu_map_event(event, &xscale_perf_map, |
| 361 | &xscale_perf_cache_map, 0xFF); |
| 362 | } |
| 363 | |
| 364 | static int xscale1pmu_init(struct arm_pmu *cpu_pmu) |
| 365 | { |
| 366 | cpu_pmu->name = "armv5_xscale1"; |
| 367 | cpu_pmu->handle_irq = xscale1pmu_handle_irq; |
| 368 | cpu_pmu->enable = xscale1pmu_enable_event; |
| 369 | cpu_pmu->disable = xscale1pmu_disable_event; |
| 370 | cpu_pmu->read_counter = xscale1pmu_read_counter; |
| 371 | cpu_pmu->write_counter = xscale1pmu_write_counter; |
| 372 | cpu_pmu->get_event_idx = xscale1pmu_get_event_idx; |
| 373 | cpu_pmu->start = xscale1pmu_start; |
| 374 | cpu_pmu->stop = xscale1pmu_stop; |
| 375 | cpu_pmu->map_event = xscale_map_event; |
| 376 | cpu_pmu->num_events = 3; |
| 377 | cpu_pmu->max_period = (1LLU << 32) - 1; |
| 378 | |
| 379 | return 0; |
| 380 | } |
| 381 | |
| 382 | #define XSCALE2_OVERFLOWED_MASK 0x01f |
| 383 | #define XSCALE2_CCOUNT_OVERFLOW 0x001 |
| 384 | #define XSCALE2_COUNT0_OVERFLOW 0x002 |
| 385 | #define XSCALE2_COUNT1_OVERFLOW 0x004 |
| 386 | #define XSCALE2_COUNT2_OVERFLOW 0x008 |
| 387 | #define XSCALE2_COUNT3_OVERFLOW 0x010 |
| 388 | #define XSCALE2_CCOUNT_INT_EN 0x001 |
| 389 | #define XSCALE2_COUNT0_INT_EN 0x002 |
| 390 | #define XSCALE2_COUNT1_INT_EN 0x004 |
| 391 | #define XSCALE2_COUNT2_INT_EN 0x008 |
| 392 | #define XSCALE2_COUNT3_INT_EN 0x010 |
| 393 | #define XSCALE2_COUNT0_EVT_SHFT 0 |
| 394 | #define XSCALE2_COUNT0_EVT_MASK (0xff << XSCALE2_COUNT0_EVT_SHFT) |
| 395 | #define XSCALE2_COUNT1_EVT_SHFT 8 |
| 396 | #define XSCALE2_COUNT1_EVT_MASK (0xff << XSCALE2_COUNT1_EVT_SHFT) |
| 397 | #define XSCALE2_COUNT2_EVT_SHFT 16 |
| 398 | #define XSCALE2_COUNT2_EVT_MASK (0xff << XSCALE2_COUNT2_EVT_SHFT) |
| 399 | #define XSCALE2_COUNT3_EVT_SHFT 24 |
| 400 | #define XSCALE2_COUNT3_EVT_MASK (0xff << XSCALE2_COUNT3_EVT_SHFT) |
| 401 | |
| 402 | static inline u32 |
| 403 | xscale2pmu_read_pmnc(void) |
| 404 | { |
| 405 | u32 val; |
| 406 | asm volatile("mrc p14, 0, %0, c0, c1, 0" : "=r" (val)); |
| 407 | /* bits 1-2 and 4-23 are read-unpredictable */ |
| 408 | return val & 0xff000009; |
| 409 | } |
| 410 | |
| 411 | static inline void |
| 412 | xscale2pmu_write_pmnc(u32 val) |
| 413 | { |
| 414 | /* bits 4-23 are write-as-0, 24-31 are write ignored */ |
| 415 | val &= 0xf; |
| 416 | asm volatile("mcr p14, 0, %0, c0, c1, 0" : : "r" (val)); |
| 417 | } |
| 418 | |
| 419 | static inline u32 |
| 420 | xscale2pmu_read_overflow_flags(void) |
| 421 | { |
| 422 | u32 val; |
| 423 | asm volatile("mrc p14, 0, %0, c5, c1, 0" : "=r" (val)); |
| 424 | return val; |
| 425 | } |
| 426 | |
| 427 | static inline void |
| 428 | xscale2pmu_write_overflow_flags(u32 val) |
| 429 | { |
| 430 | asm volatile("mcr p14, 0, %0, c5, c1, 0" : : "r" (val)); |
| 431 | } |
| 432 | |
| 433 | static inline u32 |
| 434 | xscale2pmu_read_event_select(void) |
| 435 | { |
| 436 | u32 val; |
| 437 | asm volatile("mrc p14, 0, %0, c8, c1, 0" : "=r" (val)); |
| 438 | return val; |
| 439 | } |
| 440 | |
| 441 | static inline void |
| 442 | xscale2pmu_write_event_select(u32 val) |
| 443 | { |
| 444 | asm volatile("mcr p14, 0, %0, c8, c1, 0" : : "r"(val)); |
| 445 | } |
| 446 | |
| 447 | static inline u32 |
| 448 | xscale2pmu_read_int_enable(void) |
| 449 | { |
| 450 | u32 val; |
| 451 | asm volatile("mrc p14, 0, %0, c4, c1, 0" : "=r" (val)); |
| 452 | return val; |
| 453 | } |
| 454 | |
| 455 | static void |
| 456 | xscale2pmu_write_int_enable(u32 val) |
| 457 | { |
| 458 | asm volatile("mcr p14, 0, %0, c4, c1, 0" : : "r" (val)); |
| 459 | } |
| 460 | |
| 461 | static inline int |
| 462 | xscale2_pmnc_counter_has_overflowed(unsigned long of_flags, |
| 463 | enum xscale_counters counter) |
| 464 | { |
| 465 | int ret = 0; |
| 466 | |
| 467 | switch (counter) { |
| 468 | case XSCALE_CYCLE_COUNTER: |
| 469 | ret = of_flags & XSCALE2_CCOUNT_OVERFLOW; |
| 470 | break; |
| 471 | case XSCALE_COUNTER0: |
| 472 | ret = of_flags & XSCALE2_COUNT0_OVERFLOW; |
| 473 | break; |
| 474 | case XSCALE_COUNTER1: |
| 475 | ret = of_flags & XSCALE2_COUNT1_OVERFLOW; |
| 476 | break; |
| 477 | case XSCALE_COUNTER2: |
| 478 | ret = of_flags & XSCALE2_COUNT2_OVERFLOW; |
| 479 | break; |
| 480 | case XSCALE_COUNTER3: |
| 481 | ret = of_flags & XSCALE2_COUNT3_OVERFLOW; |
| 482 | break; |
| 483 | default: |
| 484 | WARN_ONCE(1, "invalid counter number (%d)\n", counter); |
| 485 | } |
| 486 | |
| 487 | return ret; |
| 488 | } |
| 489 | |
| 490 | static irqreturn_t |
| 491 | xscale2pmu_handle_irq(int irq_num, void *dev) |
| 492 | { |
| 493 | unsigned long pmnc, of_flags; |
| 494 | struct perf_sample_data data; |
| 495 | struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev; |
| 496 | struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); |
| 497 | struct pt_regs *regs; |
| 498 | int idx; |
| 499 | |
| 500 | /* Disable the PMU. */ |
| 501 | pmnc = xscale2pmu_read_pmnc(); |
| 502 | xscale2pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE); |
| 503 | |
| 504 | /* Check the overflow flag register. */ |
| 505 | of_flags = xscale2pmu_read_overflow_flags(); |
| 506 | if (!(of_flags & XSCALE2_OVERFLOWED_MASK)) |
| 507 | return IRQ_NONE; |
| 508 | |
| 509 | /* Clear the overflow bits. */ |
| 510 | xscale2pmu_write_overflow_flags(of_flags); |
| 511 | |
| 512 | regs = get_irq_regs(); |
| 513 | |
| 514 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
| 515 | struct perf_event *event = cpuc->events[idx]; |
| 516 | struct hw_perf_event *hwc; |
| 517 | |
| 518 | if (!event) |
| 519 | continue; |
| 520 | |
| 521 | if (!xscale2_pmnc_counter_has_overflowed(of_flags, idx)) |
| 522 | continue; |
| 523 | |
| 524 | hwc = &event->hw; |
| 525 | armpmu_event_update(event); |
| 526 | perf_sample_data_init(&data, 0, hwc->last_period); |
| 527 | if (!armpmu_event_set_period(event)) |
| 528 | continue; |
| 529 | |
| 530 | if (perf_event_overflow(event, &data, regs)) |
| 531 | cpu_pmu->disable(event); |
| 532 | } |
| 533 | |
| 534 | irq_work_run(); |
| 535 | |
| 536 | /* |
| 537 | * Re-enable the PMU. |
| 538 | */ |
| 539 | pmnc = xscale2pmu_read_pmnc() | XSCALE_PMU_ENABLE; |
| 540 | xscale2pmu_write_pmnc(pmnc); |
| 541 | |
| 542 | return IRQ_HANDLED; |
| 543 | } |
| 544 | |
| 545 | static void xscale2pmu_enable_event(struct perf_event *event) |
| 546 | { |
| 547 | unsigned long flags, ien, evtsel; |
| 548 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); |
| 549 | struct hw_perf_event *hwc = &event->hw; |
| 550 | struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); |
| 551 | int idx = hwc->idx; |
| 552 | |
| 553 | ien = xscale2pmu_read_int_enable(); |
| 554 | evtsel = xscale2pmu_read_event_select(); |
| 555 | |
| 556 | switch (idx) { |
| 557 | case XSCALE_CYCLE_COUNTER: |
| 558 | ien |= XSCALE2_CCOUNT_INT_EN; |
| 559 | break; |
| 560 | case XSCALE_COUNTER0: |
| 561 | ien |= XSCALE2_COUNT0_INT_EN; |
| 562 | evtsel &= ~XSCALE2_COUNT0_EVT_MASK; |
| 563 | evtsel |= hwc->config_base << XSCALE2_COUNT0_EVT_SHFT; |
| 564 | break; |
| 565 | case XSCALE_COUNTER1: |
| 566 | ien |= XSCALE2_COUNT1_INT_EN; |
| 567 | evtsel &= ~XSCALE2_COUNT1_EVT_MASK; |
| 568 | evtsel |= hwc->config_base << XSCALE2_COUNT1_EVT_SHFT; |
| 569 | break; |
| 570 | case XSCALE_COUNTER2: |
| 571 | ien |= XSCALE2_COUNT2_INT_EN; |
| 572 | evtsel &= ~XSCALE2_COUNT2_EVT_MASK; |
| 573 | evtsel |= hwc->config_base << XSCALE2_COUNT2_EVT_SHFT; |
| 574 | break; |
| 575 | case XSCALE_COUNTER3: |
| 576 | ien |= XSCALE2_COUNT3_INT_EN; |
| 577 | evtsel &= ~XSCALE2_COUNT3_EVT_MASK; |
| 578 | evtsel |= hwc->config_base << XSCALE2_COUNT3_EVT_SHFT; |
| 579 | break; |
| 580 | default: |
| 581 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); |
| 582 | return; |
| 583 | } |
| 584 | |
| 585 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
| 586 | xscale2pmu_write_event_select(evtsel); |
| 587 | xscale2pmu_write_int_enable(ien); |
| 588 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
| 589 | } |
| 590 | |
| 591 | static void xscale2pmu_disable_event(struct perf_event *event) |
| 592 | { |
| 593 | unsigned long flags, ien, evtsel, of_flags; |
| 594 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); |
| 595 | struct hw_perf_event *hwc = &event->hw; |
| 596 | struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); |
| 597 | int idx = hwc->idx; |
| 598 | |
| 599 | ien = xscale2pmu_read_int_enable(); |
| 600 | evtsel = xscale2pmu_read_event_select(); |
| 601 | |
| 602 | switch (idx) { |
| 603 | case XSCALE_CYCLE_COUNTER: |
| 604 | ien &= ~XSCALE2_CCOUNT_INT_EN; |
| 605 | of_flags = XSCALE2_CCOUNT_OVERFLOW; |
| 606 | break; |
| 607 | case XSCALE_COUNTER0: |
| 608 | ien &= ~XSCALE2_COUNT0_INT_EN; |
| 609 | evtsel &= ~XSCALE2_COUNT0_EVT_MASK; |
| 610 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT; |
| 611 | of_flags = XSCALE2_COUNT0_OVERFLOW; |
| 612 | break; |
| 613 | case XSCALE_COUNTER1: |
| 614 | ien &= ~XSCALE2_COUNT1_INT_EN; |
| 615 | evtsel &= ~XSCALE2_COUNT1_EVT_MASK; |
| 616 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT; |
| 617 | of_flags = XSCALE2_COUNT1_OVERFLOW; |
| 618 | break; |
| 619 | case XSCALE_COUNTER2: |
| 620 | ien &= ~XSCALE2_COUNT2_INT_EN; |
| 621 | evtsel &= ~XSCALE2_COUNT2_EVT_MASK; |
| 622 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT; |
| 623 | of_flags = XSCALE2_COUNT2_OVERFLOW; |
| 624 | break; |
| 625 | case XSCALE_COUNTER3: |
| 626 | ien &= ~XSCALE2_COUNT3_INT_EN; |
| 627 | evtsel &= ~XSCALE2_COUNT3_EVT_MASK; |
| 628 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT; |
| 629 | of_flags = XSCALE2_COUNT3_OVERFLOW; |
| 630 | break; |
| 631 | default: |
| 632 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); |
| 633 | return; |
| 634 | } |
| 635 | |
| 636 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
| 637 | xscale2pmu_write_event_select(evtsel); |
| 638 | xscale2pmu_write_int_enable(ien); |
| 639 | xscale2pmu_write_overflow_flags(of_flags); |
| 640 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
| 641 | } |
| 642 | |
| 643 | static int |
| 644 | xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc, |
| 645 | struct perf_event *event) |
| 646 | { |
| 647 | int idx = xscale1pmu_get_event_idx(cpuc, event); |
| 648 | if (idx >= 0) |
| 649 | goto out; |
| 650 | |
| 651 | if (!test_and_set_bit(XSCALE_COUNTER3, cpuc->used_mask)) |
| 652 | idx = XSCALE_COUNTER3; |
| 653 | else if (!test_and_set_bit(XSCALE_COUNTER2, cpuc->used_mask)) |
| 654 | idx = XSCALE_COUNTER2; |
| 655 | out: |
| 656 | return idx; |
| 657 | } |
| 658 | |
| 659 | static void xscale2pmu_start(struct arm_pmu *cpu_pmu) |
| 660 | { |
| 661 | unsigned long flags, val; |
| 662 | struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); |
| 663 | |
| 664 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
| 665 | val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64; |
| 666 | val |= XSCALE_PMU_ENABLE; |
| 667 | xscale2pmu_write_pmnc(val); |
| 668 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
| 669 | } |
| 670 | |
| 671 | static void xscale2pmu_stop(struct arm_pmu *cpu_pmu) |
| 672 | { |
| 673 | unsigned long flags, val; |
| 674 | struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); |
| 675 | |
| 676 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
| 677 | val = xscale2pmu_read_pmnc(); |
| 678 | val &= ~XSCALE_PMU_ENABLE; |
| 679 | xscale2pmu_write_pmnc(val); |
| 680 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
| 681 | } |
| 682 | |
| 683 | static inline u32 xscale2pmu_read_counter(struct perf_event *event) |
| 684 | { |
| 685 | struct hw_perf_event *hwc = &event->hw; |
| 686 | int counter = hwc->idx; |
| 687 | u32 val = 0; |
| 688 | |
| 689 | switch (counter) { |
| 690 | case XSCALE_CYCLE_COUNTER: |
| 691 | asm volatile("mrc p14, 0, %0, c1, c1, 0" : "=r" (val)); |
| 692 | break; |
| 693 | case XSCALE_COUNTER0: |
| 694 | asm volatile("mrc p14, 0, %0, c0, c2, 0" : "=r" (val)); |
| 695 | break; |
| 696 | case XSCALE_COUNTER1: |
| 697 | asm volatile("mrc p14, 0, %0, c1, c2, 0" : "=r" (val)); |
| 698 | break; |
| 699 | case XSCALE_COUNTER2: |
| 700 | asm volatile("mrc p14, 0, %0, c2, c2, 0" : "=r" (val)); |
| 701 | break; |
| 702 | case XSCALE_COUNTER3: |
| 703 | asm volatile("mrc p14, 0, %0, c3, c2, 0" : "=r" (val)); |
| 704 | break; |
| 705 | } |
| 706 | |
| 707 | return val; |
| 708 | } |
| 709 | |
| 710 | static inline void xscale2pmu_write_counter(struct perf_event *event, u32 val) |
| 711 | { |
| 712 | struct hw_perf_event *hwc = &event->hw; |
| 713 | int counter = hwc->idx; |
| 714 | |
| 715 | switch (counter) { |
| 716 | case XSCALE_CYCLE_COUNTER: |
| 717 | asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val)); |
| 718 | break; |
| 719 | case XSCALE_COUNTER0: |
| 720 | asm volatile("mcr p14, 0, %0, c0, c2, 0" : : "r" (val)); |
| 721 | break; |
| 722 | case XSCALE_COUNTER1: |
| 723 | asm volatile("mcr p14, 0, %0, c1, c2, 0" : : "r" (val)); |
| 724 | break; |
| 725 | case XSCALE_COUNTER2: |
| 726 | asm volatile("mcr p14, 0, %0, c2, c2, 0" : : "r" (val)); |
| 727 | break; |
| 728 | case XSCALE_COUNTER3: |
| 729 | asm volatile("mcr p14, 0, %0, c3, c2, 0" : : "r" (val)); |
| 730 | break; |
| 731 | } |
| 732 | } |
| 733 | |
| 734 | static int xscale2pmu_init(struct arm_pmu *cpu_pmu) |
| 735 | { |
| 736 | cpu_pmu->name = "armv5_xscale2"; |
| 737 | cpu_pmu->handle_irq = xscale2pmu_handle_irq; |
| 738 | cpu_pmu->enable = xscale2pmu_enable_event; |
| 739 | cpu_pmu->disable = xscale2pmu_disable_event; |
| 740 | cpu_pmu->read_counter = xscale2pmu_read_counter; |
| 741 | cpu_pmu->write_counter = xscale2pmu_write_counter; |
| 742 | cpu_pmu->get_event_idx = xscale2pmu_get_event_idx; |
| 743 | cpu_pmu->start = xscale2pmu_start; |
| 744 | cpu_pmu->stop = xscale2pmu_stop; |
| 745 | cpu_pmu->map_event = xscale_map_event; |
| 746 | cpu_pmu->num_events = 5; |
| 747 | cpu_pmu->max_period = (1LLU << 32) - 1; |
| 748 | |
| 749 | return 0; |
| 750 | } |
| 751 | |
| 752 | static const struct pmu_probe_info xscale_pmu_probe_table[] = { |
| 753 | XSCALE_PMU_PROBE(ARM_CPU_XSCALE_ARCH_V1, xscale1pmu_init), |
| 754 | XSCALE_PMU_PROBE(ARM_CPU_XSCALE_ARCH_V2, xscale2pmu_init), |
| 755 | { /* sentinel value */ } |
| 756 | }; |
| 757 | |
| 758 | static int xscale_pmu_device_probe(struct platform_device *pdev) |
| 759 | { |
| 760 | return arm_pmu_device_probe(pdev, NULL, xscale_pmu_probe_table); |
| 761 | } |
| 762 | |
| 763 | static struct platform_driver xscale_pmu_driver = { |
| 764 | .driver = { |
| 765 | .name = "xscale-pmu", |
| 766 | }, |
| 767 | .probe = xscale_pmu_device_probe, |
| 768 | }; |
| 769 | |
| 770 | static int __init register_xscale_pmu_driver(void) |
| 771 | { |
| 772 | return platform_driver_register(&xscale_pmu_driver); |
| 773 | } |
| 774 | device_initcall(register_xscale_pmu_driver); |
| 775 | #endif /* CONFIG_CPU_XSCALE */ |