Merge tag 'hwmon-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/groeck...
[deliverable/linux.git] / arch / arm / kernel / perf_event_cpu.c
CommitLineData
5505b206
WD
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
14 *
15 * Copyright (C) 2012 ARM Limited
16 *
17 * Author: Will Deacon <will.deacon@arm.com>
18 */
19#define pr_fmt(fmt) "CPU PMU: " fmt
20
21#include <linux/bitmap.h>
22#include <linux/export.h>
23#include <linux/kernel.h>
24#include <linux/of.h>
25#include <linux/platform_device.h>
513c99ce 26#include <linux/slab.h>
5505b206 27#include <linux/spinlock.h>
bbd64559
SB
28#include <linux/irq.h>
29#include <linux/irqdesc.h>
5505b206
WD
30
31#include <asm/cputype.h>
32#include <asm/irq_regs.h>
33#include <asm/pmu.h>
34
35/* Set at runtime when we know what CPU type we are. */
36static struct arm_pmu *cpu_pmu;
37
bbd64559 38static DEFINE_PER_CPU(struct arm_pmu *, percpu_pmu);
5505b206
WD
39static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
40static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
41static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
42
43/*
44 * Despite the names, these two functions are CPU-specific and are used
45 * by the OProfile/perf code.
46 */
47const char *perf_pmu_name(void)
48{
49 if (!cpu_pmu)
50 return NULL;
51
0305230a 52 return cpu_pmu->name;
5505b206
WD
53}
54EXPORT_SYMBOL_GPL(perf_pmu_name);
55
56int perf_num_counters(void)
57{
58 int max_events = 0;
59
60 if (cpu_pmu != NULL)
61 max_events = cpu_pmu->num_events;
62
63 return max_events;
64}
65EXPORT_SYMBOL_GPL(perf_num_counters);
66
67/* Include the PMU-specific implementations. */
68#include "perf_event_xscale.c"
69#include "perf_event_v6.c"
70#include "perf_event_v7.c"
71
72static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)
73{
1436c1aa 74 return this_cpu_ptr(&cpu_hw_events);
5505b206
WD
75}
76
bbd64559
SB
77static void cpu_pmu_enable_percpu_irq(void *data)
78{
79 struct arm_pmu *cpu_pmu = data;
80 struct platform_device *pmu_device = cpu_pmu->plat_device;
81 int irq = platform_get_irq(pmu_device, 0);
82
83 enable_percpu_irq(irq, IRQ_TYPE_NONE);
84 cpumask_set_cpu(smp_processor_id(), &cpu_pmu->active_irqs);
85}
86
87static void cpu_pmu_disable_percpu_irq(void *data)
88{
89 struct arm_pmu *cpu_pmu = data;
90 struct platform_device *pmu_device = cpu_pmu->plat_device;
91 int irq = platform_get_irq(pmu_device, 0);
92
93 cpumask_clear_cpu(smp_processor_id(), &cpu_pmu->active_irqs);
94 disable_percpu_irq(irq);
95}
96
ed6f2a52 97static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
051f1b13
SK
98{
99 int i, irq, irqs;
100 struct platform_device *pmu_device = cpu_pmu->plat_device;
101
102 irqs = min(pmu_device->num_resources, num_possible_cpus());
103
bbd64559
SB
104 irq = platform_get_irq(pmu_device, 0);
105 if (irq >= 0 && irq_is_percpu(irq)) {
106 on_each_cpu(cpu_pmu_disable_percpu_irq, cpu_pmu, 1);
107 free_percpu_irq(irq, &percpu_pmu);
108 } else {
109 for (i = 0; i < irqs; ++i) {
110 if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs))
111 continue;
112 irq = platform_get_irq(pmu_device, i);
113 if (irq >= 0)
114 free_irq(irq, cpu_pmu);
115 }
051f1b13
SK
116 }
117}
118
ed6f2a52 119static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
051f1b13
SK
120{
121 int i, err, irq, irqs;
122 struct platform_device *pmu_device = cpu_pmu->plat_device;
123
124 if (!pmu_device)
125 return -ENODEV;
126
127 irqs = min(pmu_device->num_resources, num_possible_cpus());
128 if (irqs < 1) {
edcb4d3c
VW
129 printk_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n");
130 return 0;
051f1b13
SK
131 }
132
bbd64559
SB
133 irq = platform_get_irq(pmu_device, 0);
134 if (irq >= 0 && irq_is_percpu(irq)) {
135 err = request_percpu_irq(irq, handler, "arm-pmu", &percpu_pmu);
051f1b13
SK
136 if (err) {
137 pr_err("unable to request IRQ%d for ARM PMU counters\n",
138 irq);
139 return err;
140 }
bbd64559
SB
141 on_each_cpu(cpu_pmu_enable_percpu_irq, cpu_pmu, 1);
142 } else {
143 for (i = 0; i < irqs; ++i) {
144 err = 0;
145 irq = platform_get_irq(pmu_device, i);
146 if (irq < 0)
147 continue;
148
149 /*
150 * If we have a single PMU interrupt that we can't shift,
151 * assume that we're running on a uniprocessor machine and
152 * continue. Otherwise, continue without this interrupt.
153 */
154 if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
155 pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
156 irq, i);
157 continue;
158 }
159
160 err = request_irq(irq, handler,
161 IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
162 cpu_pmu);
163 if (err) {
164 pr_err("unable to request IRQ%d for ARM PMU counters\n",
165 irq);
166 return err;
167 }
168
169 cpumask_set_cpu(i, &cpu_pmu->active_irqs);
170 }
051f1b13
SK
171 }
172
173 return 0;
174}
175
351a102d 176static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
5505b206
WD
177{
178 int cpu;
179 for_each_possible_cpu(cpu) {
180 struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
181 events->events = per_cpu(hw_events, cpu);
182 events->used_mask = per_cpu(used_mask, cpu);
183 raw_spin_lock_init(&events->pmu_lock);
bbd64559 184 per_cpu(percpu_pmu, cpu) = cpu_pmu;
5505b206 185 }
051f1b13
SK
186
187 cpu_pmu->get_hw_events = cpu_pmu_get_cpu_events;
188 cpu_pmu->request_irq = cpu_pmu_request_irq;
189 cpu_pmu->free_irq = cpu_pmu_free_irq;
5505b206
WD
190
191 /* Ensure the PMU has sane values out of reset. */
1764c591 192 if (cpu_pmu->reset)
ed6f2a52 193 on_each_cpu(cpu_pmu->reset, cpu_pmu, 1);
edcb4d3c
VW
194
195 /* If no interrupts available, set the corresponding capability flag */
196 if (!platform_get_irq(cpu_pmu->plat_device, 0))
197 cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
5505b206
WD
198}
199
200/*
201 * PMU hardware loses all context when a CPU goes offline.
202 * When a CPU is hotplugged back in, since some hardware registers are
203 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
204 * junk values out of them.
205 */
8bd26e3a
PG
206static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
207 void *hcpu)
5505b206
WD
208{
209 if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
210 return NOTIFY_DONE;
211
212 if (cpu_pmu && cpu_pmu->reset)
ed6f2a52 213 cpu_pmu->reset(cpu_pmu);
288700d1
WD
214 else
215 return NOTIFY_DONE;
5505b206
WD
216
217 return NOTIFY_OK;
218}
219
8bd26e3a 220static struct notifier_block cpu_pmu_hotplug_notifier = {
5505b206
WD
221 .notifier_call = cpu_pmu_notify,
222};
223
224/*
225 * PMU platform driver and devicetree bindings.
226 */
351a102d 227static struct of_device_id cpu_pmu_of_device_ids[] = {
03eff46c 228 {.compatible = "arm,cortex-a17-pmu", .data = armv7_a17_pmu_init},
5505b206 229 {.compatible = "arm,cortex-a15-pmu", .data = armv7_a15_pmu_init},
8e781f65 230 {.compatible = "arm,cortex-a12-pmu", .data = armv7_a12_pmu_init},
5505b206
WD
231 {.compatible = "arm,cortex-a9-pmu", .data = armv7_a9_pmu_init},
232 {.compatible = "arm,cortex-a8-pmu", .data = armv7_a8_pmu_init},
233 {.compatible = "arm,cortex-a7-pmu", .data = armv7_a7_pmu_init},
234 {.compatible = "arm,cortex-a5-pmu", .data = armv7_a5_pmu_init},
235 {.compatible = "arm,arm11mpcore-pmu", .data = armv6mpcore_pmu_init},
3d1ff755
MR
236 {.compatible = "arm,arm1176-pmu", .data = armv6_1176_pmu_init},
237 {.compatible = "arm,arm1136-pmu", .data = armv6_1136_pmu_init},
2a3391cd 238 {.compatible = "qcom,krait-pmu", .data = krait_pmu_init},
5505b206
WD
239 {},
240};
241
351a102d 242static struct platform_device_id cpu_pmu_plat_device_ids[] = {
5505b206 243 {.name = "arm-pmu"},
253d8c3d
MR
244 {.name = "armv6-pmu"},
245 {.name = "armv7-pmu"},
246 {.name = "xscale-pmu"},
5505b206
WD
247 {},
248};
249
250/*
251 * CPU PMU identification and probing.
252 */
351a102d 253static int probe_current_pmu(struct arm_pmu *pmu)
5505b206 254{
5505b206 255 int cpu = get_cpu();
513c99ce 256 int ret = -ENODEV;
5505b206
WD
257
258 pr_info("probing PMU on CPU %d\n", cpu);
259
af040ffc 260 switch (read_cpuid_part()) {
5505b206 261 /* ARM Ltd CPUs. */
af040ffc 262 case ARM_CPU_PART_ARM1136:
f15bdfe4
RK
263 ret = armv6_1136_pmu_init(pmu);
264 break;
af040ffc 265 case ARM_CPU_PART_ARM1156:
f15bdfe4
RK
266 ret = armv6_1156_pmu_init(pmu);
267 break;
af040ffc 268 case ARM_CPU_PART_ARM1176:
f15bdfe4 269 ret = armv6_1176_pmu_init(pmu);
af040ffc
RK
270 break;
271 case ARM_CPU_PART_ARM11MPCORE:
272 ret = armv6mpcore_pmu_init(pmu);
273 break;
274 case ARM_CPU_PART_CORTEX_A8:
275 ret = armv7_a8_pmu_init(pmu);
276 break;
277 case ARM_CPU_PART_CORTEX_A9:
278 ret = armv7_a9_pmu_init(pmu);
279 break;
280
281 default:
282 if (read_cpuid_implementor() == ARM_CPU_IMP_INTEL) {
283 switch (xscale_cpu_arch_version()) {
284 case ARM_CPU_XSCALE_ARCH_V1:
285 ret = xscale1pmu_init(pmu);
286 break;
287 case ARM_CPU_XSCALE_ARCH_V2:
288 ret = xscale2pmu_init(pmu);
289 break;
290 }
5505b206 291 }
af040ffc 292 break;
5505b206
WD
293 }
294
295 put_cpu();
513c99ce 296 return ret;
5505b206
WD
297}
298
351a102d 299static int cpu_pmu_device_probe(struct platform_device *pdev)
5505b206
WD
300{
301 const struct of_device_id *of_id;
261521f1 302 const int (*init_fn)(struct arm_pmu *);
5505b206 303 struct device_node *node = pdev->dev.of_node;
513c99ce
SK
304 struct arm_pmu *pmu;
305 int ret = -ENODEV;
5505b206
WD
306
307 if (cpu_pmu) {
308 pr_info("attempt to register multiple PMU devices!");
309 return -ENOSPC;
310 }
311
513c99ce
SK
312 pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
313 if (!pmu) {
314 pr_info("failed to allocate PMU device!");
315 return -ENOMEM;
316 }
317
3a3967ed
SB
318 cpu_pmu = pmu;
319 cpu_pmu->plat_device = pdev;
320
5505b206
WD
321 if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) {
322 init_fn = of_id->data;
513c99ce 323 ret = init_fn(pmu);
5505b206 324 } else {
513c99ce 325 ret = probe_current_pmu(pmu);
5505b206
WD
326 }
327
513c99ce 328 if (ret) {
76b8a0e4
MR
329 pr_info("failed to probe PMU!");
330 goto out_free;
513c99ce 331 }
5505b206 332
5505b206 333 cpu_pmu_init(cpu_pmu);
76b8a0e4 334 ret = armpmu_register(cpu_pmu, PERF_TYPE_RAW);
5505b206 335
76b8a0e4
MR
336 if (!ret)
337 return 0;
338
339out_free:
340 pr_info("failed to register PMU devices!");
341 kfree(pmu);
342 return ret;
5505b206
WD
343}
344
345static struct platform_driver cpu_pmu_driver = {
346 .driver = {
347 .name = "arm-pmu",
348 .pm = &armpmu_dev_pm_ops,
349 .of_match_table = cpu_pmu_of_device_ids,
350 },
351 .probe = cpu_pmu_device_probe,
352 .id_table = cpu_pmu_plat_device_ids,
353};
354
355static int __init register_pmu_driver(void)
356{
2a4961ba
MR
357 int err;
358
359 err = register_cpu_notifier(&cpu_pmu_hotplug_notifier);
360 if (err)
361 return err;
362
363 err = platform_driver_register(&cpu_pmu_driver);
364 if (err)
365 unregister_cpu_notifier(&cpu_pmu_hotplug_notifier);
366
367 return err;
5505b206
WD
368}
369device_initcall(register_pmu_driver);
This page took 0.110827 seconds and 5 git commands to generate.