x86/oprofile: Minor changes in op_model_athlon.c
[deliverable/linux.git] / arch / x86 / oprofile / nmi_int.c
CommitLineData
1da177e4
LT
1/**
2 * @file nmi_int.c
3 *
adf5ec0b 4 * @remark Copyright 2002-2008 OProfile authors
1da177e4
LT
5 * @remark Read the file COPYING
6 *
7 * @author John Levon <levon@movementarian.org>
adf5ec0b 8 * @author Robert Richter <robert.richter@amd.com>
1da177e4
LT
9 */
10
11#include <linux/init.h>
12#include <linux/notifier.h>
13#include <linux/smp.h>
14#include <linux/oprofile.h>
15#include <linux/sysdev.h>
16#include <linux/slab.h>
1cfcea1b 17#include <linux/moduleparam.h>
1eeb66a1 18#include <linux/kdebug.h>
1da177e4
LT
19#include <asm/nmi.h>
20#include <asm/msr.h>
21#include <asm/apic.h>
b75f53db 22
1da177e4
LT
23#include "op_counter.h"
24#include "op_x86_model.h"
2fbe7b25 25
b75f53db 26static struct op_x86_model_spec const *model;
d18d00f5
MT
27static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
28static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
2fbe7b25 29
1da177e4
LT
30static int nmi_start(void);
31static void nmi_stop(void);
32
33/* 0 == registered but off, 1 == registered and on */
34static int nmi_enabled = 0;
35
36#ifdef CONFIG_PM
37
438510f6 38static int nmi_suspend(struct sys_device *dev, pm_message_t state)
1da177e4
LT
39{
40 if (nmi_enabled == 1)
41 nmi_stop();
42 return 0;
43}
44
1da177e4
LT
45static int nmi_resume(struct sys_device *dev)
46{
47 if (nmi_enabled == 1)
48 nmi_start();
49 return 0;
50}
51
1da177e4 52static struct sysdev_class oprofile_sysclass = {
af5ca3f4 53 .name = "oprofile",
1da177e4
LT
54 .resume = nmi_resume,
55 .suspend = nmi_suspend,
56};
57
1da177e4
LT
58static struct sys_device device_oprofile = {
59 .id = 0,
60 .cls = &oprofile_sysclass,
61};
62
405ae7d3 63static int __init init_sysfs(void)
1da177e4
LT
64{
65 int error;
b75f53db
CM
66
67 error = sysdev_class_register(&oprofile_sysclass);
68 if (!error)
1da177e4
LT
69 error = sysdev_register(&device_oprofile);
70 return error;
71}
72
405ae7d3 73static void exit_sysfs(void)
1da177e4
LT
74{
75 sysdev_unregister(&device_oprofile);
76 sysdev_class_unregister(&oprofile_sysclass);
77}
78
79#else
405ae7d3
RD
80#define init_sysfs() do { } while (0)
81#define exit_sysfs() do { } while (0)
1da177e4
LT
82#endif /* CONFIG_PM */
83
c7c19f8e
AB
84static int profile_exceptions_notify(struct notifier_block *self,
85 unsigned long val, void *data)
1da177e4 86{
2fbe7b25
DZ
87 struct die_args *args = (struct die_args *)data;
88 int ret = NOTIFY_DONE;
89 int cpu = smp_processor_id();
90
b75f53db 91 switch (val) {
2fbe7b25 92 case DIE_NMI:
d18d00f5 93 if (model->check_ctrs(args->regs, &per_cpu(cpu_msrs, cpu)))
2fbe7b25
DZ
94 ret = NOTIFY_STOP;
95 break;
96 default:
97 break;
98 }
99 return ret;
1da177e4 100}
2fbe7b25 101
b75f53db 102static void nmi_cpu_save_registers(struct op_msrs *msrs)
1da177e4
LT
103{
104 unsigned int const nr_ctrs = model->num_counters;
b75f53db
CM
105 unsigned int const nr_ctrls = model->num_controls;
106 struct op_msr *counters = msrs->counters;
107 struct op_msr *controls = msrs->controls;
1da177e4
LT
108 unsigned int i;
109
110 for (i = 0; i < nr_ctrs; ++i) {
b75f53db 111 if (counters[i].addr) {
cb9c448c
DZ
112 rdmsr(counters[i].addr,
113 counters[i].saved.low,
114 counters[i].saved.high);
115 }
1da177e4 116 }
b75f53db 117
1da177e4 118 for (i = 0; i < nr_ctrls; ++i) {
b75f53db 119 if (controls[i].addr) {
cb9c448c
DZ
120 rdmsr(controls[i].addr,
121 controls[i].saved.low,
122 controls[i].saved.high);
123 }
1da177e4
LT
124 }
125}
126
b75f53db 127static void nmi_save_registers(void *dummy)
1da177e4
LT
128{
129 int cpu = smp_processor_id();
d18d00f5 130 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
1da177e4
LT
131 nmi_cpu_save_registers(msrs);
132}
133
1da177e4
LT
134static void free_msrs(void)
135{
136 int i;
c8912599 137 for_each_possible_cpu(i) {
d18d00f5
MT
138 kfree(per_cpu(cpu_msrs, i).counters);
139 per_cpu(cpu_msrs, i).counters = NULL;
140 kfree(per_cpu(cpu_msrs, i).controls);
141 per_cpu(cpu_msrs, i).controls = NULL;
1da177e4
LT
142 }
143}
144
1da177e4
LT
145static int allocate_msrs(void)
146{
147 int success = 1;
148 size_t controls_size = sizeof(struct op_msr) * model->num_controls;
149 size_t counters_size = sizeof(struct op_msr) * model->num_counters;
150
151 int i;
0939c17c 152 for_each_possible_cpu(i) {
d18d00f5
MT
153 per_cpu(cpu_msrs, i).counters = kmalloc(counters_size,
154 GFP_KERNEL);
155 if (!per_cpu(cpu_msrs, i).counters) {
1da177e4
LT
156 success = 0;
157 break;
158 }
d18d00f5
MT
159 per_cpu(cpu_msrs, i).controls = kmalloc(controls_size,
160 GFP_KERNEL);
161 if (!per_cpu(cpu_msrs, i).controls) {
1da177e4
LT
162 success = 0;
163 break;
164 }
165 }
166
167 if (!success)
168 free_msrs();
169
170 return success;
171}
172
b75f53db 173static void nmi_cpu_setup(void *dummy)
1da177e4
LT
174{
175 int cpu = smp_processor_id();
d18d00f5 176 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
1da177e4
LT
177 spin_lock(&oprofilefs_lock);
178 model->setup_ctrs(msrs);
179 spin_unlock(&oprofilefs_lock);
d18d00f5 180 per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
1da177e4
LT
181 apic_write(APIC_LVTPC, APIC_DM_NMI);
182}
183
2fbe7b25
DZ
184static struct notifier_block profile_exceptions_nb = {
185 .notifier_call = profile_exceptions_notify,
186 .next = NULL,
187 .priority = 0
188};
1da177e4
LT
189
190static int nmi_setup(void)
191{
b75f53db 192 int err = 0;
6c977aad 193 int cpu;
2fbe7b25 194
1da177e4
LT
195 if (!allocate_msrs())
196 return -ENOMEM;
197
b75f53db
CM
198 err = register_die_notifier(&profile_exceptions_nb);
199 if (err) {
1da177e4 200 free_msrs();
2fbe7b25 201 return err;
1da177e4 202 }
2fbe7b25 203
1da177e4
LT
204 /* We need to serialize save and setup for HT because the subset
205 * of msrs are distinct for save and setup operations
206 */
6c977aad
AK
207
208 /* Assume saved/restored counters are the same on all CPUs */
d18d00f5 209 model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
b75f53db 210 for_each_possible_cpu(cpu) {
0939c17c 211 if (cpu != 0) {
d18d00f5
MT
212 memcpy(per_cpu(cpu_msrs, cpu).counters,
213 per_cpu(cpu_msrs, 0).counters,
0939c17c
CW
214 sizeof(struct op_msr) * model->num_counters);
215
d18d00f5
MT
216 memcpy(per_cpu(cpu_msrs, cpu).controls,
217 per_cpu(cpu_msrs, 0).controls,
0939c17c
CW
218 sizeof(struct op_msr) * model->num_controls);
219 }
220
6c977aad 221 }
15c8b6c1
JA
222 on_each_cpu(nmi_save_registers, NULL, 1);
223 on_each_cpu(nmi_cpu_setup, NULL, 1);
1da177e4
LT
224 nmi_enabled = 1;
225 return 0;
226}
227
b75f53db 228static void nmi_restore_registers(struct op_msrs *msrs)
1da177e4
LT
229{
230 unsigned int const nr_ctrs = model->num_counters;
b75f53db
CM
231 unsigned int const nr_ctrls = model->num_controls;
232 struct op_msr *counters = msrs->counters;
233 struct op_msr *controls = msrs->controls;
1da177e4
LT
234 unsigned int i;
235
236 for (i = 0; i < nr_ctrls; ++i) {
b75f53db 237 if (controls[i].addr) {
cb9c448c
DZ
238 wrmsr(controls[i].addr,
239 controls[i].saved.low,
240 controls[i].saved.high);
241 }
1da177e4 242 }
b75f53db 243
1da177e4 244 for (i = 0; i < nr_ctrs; ++i) {
b75f53db 245 if (counters[i].addr) {
cb9c448c
DZ
246 wrmsr(counters[i].addr,
247 counters[i].saved.low,
248 counters[i].saved.high);
249 }
1da177e4
LT
250 }
251}
1da177e4 252
b75f53db 253static void nmi_cpu_shutdown(void *dummy)
1da177e4
LT
254{
255 unsigned int v;
256 int cpu = smp_processor_id();
d18d00f5 257 struct op_msrs *msrs = &__get_cpu_var(cpu_msrs);
b75f53db 258
1da177e4
LT
259 /* restoring APIC_LVTPC can trigger an apic error because the delivery
260 * mode and vector nr combination can be illegal. That's by design: on
261 * power on apic lvt contain a zero vector nr which are legal only for
262 * NMI delivery mode. So inhibit apic err before restoring lvtpc
263 */
264 v = apic_read(APIC_LVTERR);
265 apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
d18d00f5 266 apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
1da177e4
LT
267 apic_write(APIC_LVTERR, v);
268 nmi_restore_registers(msrs);
269}
270
1da177e4
LT
271static void nmi_shutdown(void)
272{
93e1ade5 273 struct op_msrs *msrs = &get_cpu_var(cpu_msrs);
1da177e4 274 nmi_enabled = 0;
15c8b6c1 275 on_each_cpu(nmi_cpu_shutdown, NULL, 1);
2fbe7b25 276 unregister_die_notifier(&profile_exceptions_nb);
d18d00f5 277 model->shutdown(msrs);
1da177e4 278 free_msrs();
93e1ade5 279 put_cpu_var(cpu_msrs);
1da177e4
LT
280}
281
b75f53db 282static void nmi_cpu_start(void *dummy)
1da177e4 283{
d18d00f5 284 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
1da177e4
LT
285 model->start(msrs);
286}
1da177e4
LT
287
288static int nmi_start(void)
289{
15c8b6c1 290 on_each_cpu(nmi_cpu_start, NULL, 1);
1da177e4
LT
291 return 0;
292}
b75f53db
CM
293
294static void nmi_cpu_stop(void *dummy)
1da177e4 295{
d18d00f5 296 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
1da177e4
LT
297 model->stop(msrs);
298}
b75f53db 299
1da177e4
LT
300static void nmi_stop(void)
301{
15c8b6c1 302 on_each_cpu(nmi_cpu_stop, NULL, 1);
1da177e4
LT
303}
304
1da177e4
LT
305struct op_counter_config counter_config[OP_MAX_COUNTER];
306
b75f53db 307static int nmi_create_files(struct super_block *sb, struct dentry *root)
1da177e4
LT
308{
309 unsigned int i;
310
311 for (i = 0; i < model->num_counters; ++i) {
b75f53db 312 struct dentry *dir;
0c6856f7 313 char buf[4];
b75f53db
CM
314
315 /* quick little hack to _not_ expose a counter if it is not
cb9c448c
DZ
316 * available for use. This should protect userspace app.
317 * NOTE: assumes 1:1 mapping here (that counters are organized
318 * sequentially in their struct assignment).
319 */
320 if (unlikely(!avail_to_resrv_perfctr_nmi_bit(i)))
321 continue;
322
0c6856f7 323 snprintf(buf, sizeof(buf), "%d", i);
1da177e4 324 dir = oprofilefs_mkdir(sb, root, buf);
b75f53db
CM
325 oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
326 oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
327 oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
328 oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
329 oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
330 oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
1da177e4
LT
331 }
332
333 return 0;
334}
b75f53db 335
1cfcea1b
AK
336static int p4force;
337module_param(p4force, int, 0);
b75f53db
CM
338
339static int __init p4_init(char **cpu_type)
1da177e4
LT
340{
341 __u8 cpu_model = boot_cpu_data.x86_model;
342
1cfcea1b 343 if (!p4force && (cpu_model > 6 || cpu_model == 5))
1da177e4
LT
344 return 0;
345
346#ifndef CONFIG_SMP
347 *cpu_type = "i386/p4";
348 model = &op_p4_spec;
349 return 1;
350#else
351 switch (smp_num_siblings) {
b75f53db
CM
352 case 1:
353 *cpu_type = "i386/p4";
354 model = &op_p4_spec;
355 return 1;
356
357 case 2:
358 *cpu_type = "i386/p4-ht";
359 model = &op_p4_ht2_spec;
360 return 1;
1da177e4
LT
361 }
362#endif
363
364 printk(KERN_INFO "oprofile: P4 HyperThreading detected with > 2 threads\n");
365 printk(KERN_INFO "oprofile: Reverting to timer mode.\n");
366 return 0;
367}
368
b75f53db 369static int __init ppro_init(char **cpu_type)
1da177e4
LT
370{
371 __u8 cpu_model = boot_cpu_data.x86_model;
372
4b9f12a3
LT
373 switch (cpu_model) {
374 case 0 ... 2:
375 *cpu_type = "i386/ppro";
376 break;
377 case 3 ... 5:
378 *cpu_type = "i386/pii";
379 break;
380 case 6 ... 8:
381 *cpu_type = "i386/piii";
382 break;
383 case 9:
384 *cpu_type = "i386/p6_mobile";
385 break;
386 case 10 ... 13:
387 *cpu_type = "i386/p6";
388 break;
389 case 14:
64471ebe 390 *cpu_type = "i386/core";
4b9f12a3
LT
391 break;
392 case 15: case 23:
393 *cpu_type = "i386/core_2";
394 break;
395 case 26:
f04b92e9 396 *cpu_type = "i386/core_2";
4b9f12a3
LT
397 break;
398 default:
399 /* Unknown */
1da177e4 400 return 0;
1da177e4
LT
401 }
402
403 model = &op_ppro_spec;
404 return 1;
405}
406
405ae7d3 407/* in order to get sysfs right */
1da177e4
LT
408static int using_nmi;
409
96d0821c 410int __init op_nmi_init(struct oprofile_operations *ops)
1da177e4
LT
411{
412 __u8 vendor = boot_cpu_data.x86_vendor;
413 __u8 family = boot_cpu_data.x86;
414 char *cpu_type;
adf5ec0b 415 int ret = 0;
1da177e4
LT
416
417 if (!cpu_has_apic)
418 return -ENODEV;
b75f53db 419
1da177e4 420 switch (vendor) {
b75f53db
CM
421 case X86_VENDOR_AMD:
422 /* Needs to be at least an Athlon (or hammer in 32bit mode) */
1da177e4 423
b75f53db
CM
424 switch (family) {
425 default:
426 return -ENODEV;
427 case 6:
428 model = &op_athlon_spec;
429 cpu_type = "i386/athlon";
430 break;
431 case 0xf:
432 model = &op_athlon_spec;
433 /* Actually it could be i386/hammer too, but give
434 user space an consistent name. */
435 cpu_type = "x86-64/hammer";
436 break;
437 case 0x10:
438 model = &op_athlon_spec;
439 cpu_type = "x86-64/family10";
440 break;
12f2b261
BK
441 case 0x11:
442 model = &op_athlon_spec;
443 cpu_type = "x86-64/family11h";
444 break;
b75f53db
CM
445 }
446 break;
447
448 case X86_VENDOR_INTEL:
449 switch (family) {
450 /* Pentium IV */
451 case 0xf:
452 if (!p4_init(&cpu_type))
1da177e4 453 return -ENODEV;
1da177e4 454 break;
b75f53db
CM
455
456 /* A P6-class processor */
457 case 6:
458 if (!ppro_init(&cpu_type))
459 return -ENODEV;
1da177e4
LT
460 break;
461
462 default:
463 return -ENODEV;
b75f53db
CM
464 }
465 break;
466
467 default:
468 return -ENODEV;
1da177e4
LT
469 }
470
adf5ec0b
RR
471 if (model->init)
472 ret = model->init(ops);
473 if (ret)
474 return ret;
475
405ae7d3 476 init_sysfs();
1da177e4
LT
477 using_nmi = 1;
478 ops->create_files = nmi_create_files;
479 ops->setup = nmi_setup;
480 ops->shutdown = nmi_shutdown;
481 ops->start = nmi_start;
482 ops->stop = nmi_stop;
483 ops->cpu_type = cpu_type;
484 printk(KERN_INFO "oprofile: using NMI interrupt.\n");
485 return 0;
486}
487
96d0821c 488void op_nmi_exit(void)
1da177e4
LT
489{
490 if (using_nmi)
405ae7d3 491 exit_sysfs();
adf5ec0b
RR
492 if (model->exit)
493 model->exit();
1da177e4 494}
This page took 0.373544 seconds and 5 git commands to generate.