4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
7 * @author John Levon <levon@movementarian.org>
10 #include <linux/init.h>
11 #include <linux/notifier.h>
12 #include <linux/smp.h>
13 #include <linux/oprofile.h>
14 #include <linux/sysdev.h>
15 #include <linux/slab.h>
16 #include <linux/moduleparam.h>
17 #include <linux/kdebug.h>
22 #include "op_counter.h"
23 #include "op_x86_model.h"
25 static struct op_x86_model_spec
const * model
;
26 static struct op_msrs cpu_msrs
[NR_CPUS
];
27 static unsigned long saved_lvtpc
[NR_CPUS
];
29 static int nmi_start(void);
30 static void nmi_stop(void);
32 /* 0 == registered but off, 1 == registered and on */
33 static int nmi_enabled
= 0;
37 static int nmi_suspend(struct sys_device
*dev
, pm_message_t state
)
45 static int nmi_resume(struct sys_device
*dev
)
53 static struct sysdev_class oprofile_sysclass
= {
54 set_kset_name("oprofile"),
56 .suspend
= nmi_suspend
,
60 static struct sys_device device_oprofile
= {
62 .cls
= &oprofile_sysclass
,
66 static int __init
init_sysfs(void)
69 if (!(error
= sysdev_class_register(&oprofile_sysclass
)))
70 error
= sysdev_register(&device_oprofile
);
75 static void exit_sysfs(void)
77 sysdev_unregister(&device_oprofile
);
78 sysdev_class_unregister(&oprofile_sysclass
);
82 #define init_sysfs() do { } while (0)
83 #define exit_sysfs() do { } while (0)
84 #endif /* CONFIG_PM */
86 static int profile_exceptions_notify(struct notifier_block
*self
,
87 unsigned long val
, void *data
)
89 struct die_args
*args
= (struct die_args
*)data
;
90 int ret
= NOTIFY_DONE
;
91 int cpu
= smp_processor_id();
95 if (model
->check_ctrs(args
->regs
, &cpu_msrs
[cpu
]))
104 static void nmi_cpu_save_registers(struct op_msrs
* msrs
)
106 unsigned int const nr_ctrs
= model
->num_counters
;
107 unsigned int const nr_ctrls
= model
->num_controls
;
108 struct op_msr
* counters
= msrs
->counters
;
109 struct op_msr
* controls
= msrs
->controls
;
112 for (i
= 0; i
< nr_ctrs
; ++i
) {
113 if (counters
[i
].addr
){
114 rdmsr(counters
[i
].addr
,
115 counters
[i
].saved
.low
,
116 counters
[i
].saved
.high
);
120 for (i
= 0; i
< nr_ctrls
; ++i
) {
121 if (controls
[i
].addr
){
122 rdmsr(controls
[i
].addr
,
123 controls
[i
].saved
.low
,
124 controls
[i
].saved
.high
);
130 static void nmi_save_registers(void * dummy
)
132 int cpu
= smp_processor_id();
133 struct op_msrs
* msrs
= &cpu_msrs
[cpu
];
134 nmi_cpu_save_registers(msrs
);
138 static void free_msrs(void)
141 for_each_possible_cpu(i
) {
142 kfree(cpu_msrs
[i
].counters
);
143 cpu_msrs
[i
].counters
= NULL
;
144 kfree(cpu_msrs
[i
].controls
);
145 cpu_msrs
[i
].controls
= NULL
;
150 static int allocate_msrs(void)
153 size_t controls_size
= sizeof(struct op_msr
) * model
->num_controls
;
154 size_t counters_size
= sizeof(struct op_msr
) * model
->num_counters
;
157 for_each_possible_cpu(i
) {
158 cpu_msrs
[i
].counters
= kmalloc(counters_size
, GFP_KERNEL
);
159 if (!cpu_msrs
[i
].counters
) {
163 cpu_msrs
[i
].controls
= kmalloc(controls_size
, GFP_KERNEL
);
164 if (!cpu_msrs
[i
].controls
) {
177 static void nmi_cpu_setup(void * dummy
)
179 int cpu
= smp_processor_id();
180 struct op_msrs
* msrs
= &cpu_msrs
[cpu
];
181 spin_lock(&oprofilefs_lock
);
182 model
->setup_ctrs(msrs
);
183 spin_unlock(&oprofilefs_lock
);
184 saved_lvtpc
[cpu
] = apic_read(APIC_LVTPC
);
185 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
188 static struct notifier_block profile_exceptions_nb
= {
189 .notifier_call
= profile_exceptions_notify
,
194 static int nmi_setup(void)
199 if (!allocate_msrs())
202 if ((err
= register_die_notifier(&profile_exceptions_nb
))){
207 /* We need to serialize save and setup for HT because the subset
208 * of msrs are distinct for save and setup operations
211 /* Assume saved/restored counters are the same on all CPUs */
212 model
->fill_in_addresses(&cpu_msrs
[0]);
213 for_each_possible_cpu (cpu
) {
215 memcpy(cpu_msrs
[cpu
].counters
, cpu_msrs
[0].counters
,
216 sizeof(struct op_msr
) * model
->num_counters
);
218 memcpy(cpu_msrs
[cpu
].controls
, cpu_msrs
[0].controls
,
219 sizeof(struct op_msr
) * model
->num_controls
);
223 on_each_cpu(nmi_save_registers
, NULL
, 0, 1);
224 on_each_cpu(nmi_cpu_setup
, NULL
, 0, 1);
230 static void nmi_restore_registers(struct op_msrs
* msrs
)
232 unsigned int const nr_ctrs
= model
->num_counters
;
233 unsigned int const nr_ctrls
= model
->num_controls
;
234 struct op_msr
* counters
= msrs
->counters
;
235 struct op_msr
* controls
= msrs
->controls
;
238 for (i
= 0; i
< nr_ctrls
; ++i
) {
239 if (controls
[i
].addr
){
240 wrmsr(controls
[i
].addr
,
241 controls
[i
].saved
.low
,
242 controls
[i
].saved
.high
);
246 for (i
= 0; i
< nr_ctrs
; ++i
) {
247 if (counters
[i
].addr
){
248 wrmsr(counters
[i
].addr
,
249 counters
[i
].saved
.low
,
250 counters
[i
].saved
.high
);
256 static void nmi_cpu_shutdown(void * dummy
)
259 int cpu
= smp_processor_id();
260 struct op_msrs
* msrs
= &cpu_msrs
[cpu
];
262 /* restoring APIC_LVTPC can trigger an apic error because the delivery
263 * mode and vector nr combination can be illegal. That's by design: on
264 * power on apic lvt contain a zero vector nr which are legal only for
265 * NMI delivery mode. So inhibit apic err before restoring lvtpc
267 v
= apic_read(APIC_LVTERR
);
268 apic_write(APIC_LVTERR
, v
| APIC_LVT_MASKED
);
269 apic_write(APIC_LVTPC
, saved_lvtpc
[cpu
]);
270 apic_write(APIC_LVTERR
, v
);
271 nmi_restore_registers(msrs
);
275 static void nmi_shutdown(void)
278 on_each_cpu(nmi_cpu_shutdown
, NULL
, 0, 1);
279 unregister_die_notifier(&profile_exceptions_nb
);
280 model
->shutdown(cpu_msrs
);
285 static void nmi_cpu_start(void * dummy
)
287 struct op_msrs
const * msrs
= &cpu_msrs
[smp_processor_id()];
292 static int nmi_start(void)
294 on_each_cpu(nmi_cpu_start
, NULL
, 0, 1);
299 static void nmi_cpu_stop(void * dummy
)
301 struct op_msrs
const * msrs
= &cpu_msrs
[smp_processor_id()];
306 static void nmi_stop(void)
308 on_each_cpu(nmi_cpu_stop
, NULL
, 0, 1);
312 struct op_counter_config counter_config
[OP_MAX_COUNTER
];
314 static int nmi_create_files(struct super_block
* sb
, struct dentry
* root
)
318 for (i
= 0; i
< model
->num_counters
; ++i
) {
322 /* quick little hack to _not_ expose a counter if it is not
323 * available for use. This should protect userspace app.
324 * NOTE: assumes 1:1 mapping here (that counters are organized
325 * sequentially in their struct assignment).
327 if (unlikely(!avail_to_resrv_perfctr_nmi_bit(i
)))
330 snprintf(buf
, sizeof(buf
), "%d", i
);
331 dir
= oprofilefs_mkdir(sb
, root
, buf
);
332 oprofilefs_create_ulong(sb
, dir
, "enabled", &counter_config
[i
].enabled
);
333 oprofilefs_create_ulong(sb
, dir
, "event", &counter_config
[i
].event
);
334 oprofilefs_create_ulong(sb
, dir
, "count", &counter_config
[i
].count
);
335 oprofilefs_create_ulong(sb
, dir
, "unit_mask", &counter_config
[i
].unit_mask
);
336 oprofilefs_create_ulong(sb
, dir
, "kernel", &counter_config
[i
].kernel
);
337 oprofilefs_create_ulong(sb
, dir
, "user", &counter_config
[i
].user
);
344 module_param(p4force
, int, 0);
346 static int __init
p4_init(char ** cpu_type
)
348 __u8 cpu_model
= boot_cpu_data
.x86_model
;
350 if (!p4force
&& (cpu_model
> 6 || cpu_model
== 5))
354 *cpu_type
= "i386/p4";
358 switch (smp_num_siblings
) {
360 *cpu_type
= "i386/p4";
365 *cpu_type
= "i386/p4-ht";
366 model
= &op_p4_ht2_spec
;
371 printk(KERN_INFO
"oprofile: P4 HyperThreading detected with > 2 threads\n");
372 printk(KERN_INFO
"oprofile: Reverting to timer mode.\n");
377 static int __init
ppro_init(char ** cpu_type
)
379 __u8 cpu_model
= boot_cpu_data
.x86_model
;
382 *cpu_type
= "i386/core";
383 else if (cpu_model
== 15)
384 *cpu_type
= "i386/core_2";
385 else if (cpu_model
> 0xd)
387 else if (cpu_model
== 9) {
388 *cpu_type
= "i386/p6_mobile";
389 } else if (cpu_model
> 5) {
390 *cpu_type
= "i386/piii";
391 } else if (cpu_model
> 2) {
392 *cpu_type
= "i386/pii";
394 *cpu_type
= "i386/ppro";
397 model
= &op_ppro_spec
;
401 /* in order to get sysfs right */
402 static int using_nmi
;
404 int __init
op_nmi_init(struct oprofile_operations
*ops
)
406 __u8 vendor
= boot_cpu_data
.x86_vendor
;
407 __u8 family
= boot_cpu_data
.x86
;
415 /* Needs to be at least an Athlon (or hammer in 32bit mode) */
421 model
= &op_athlon_spec
;
422 cpu_type
= "i386/athlon";
425 model
= &op_athlon_spec
;
426 /* Actually it could be i386/hammer too, but give
427 user space an consistent name. */
428 cpu_type
= "x86-64/hammer";
431 model
= &op_athlon_spec
;
432 cpu_type
= "x86-64/family10";
437 case X86_VENDOR_INTEL
:
441 if (!p4_init(&cpu_type
))
445 /* A P6-class processor */
447 if (!ppro_init(&cpu_type
))
462 ops
->create_files
= nmi_create_files
;
463 ops
->setup
= nmi_setup
;
464 ops
->shutdown
= nmi_shutdown
;
465 ops
->start
= nmi_start
;
466 ops
->stop
= nmi_stop
;
467 ops
->cpu_type
= cpu_type
;
468 printk(KERN_INFO
"oprofile: using NMI interrupt.\n");
473 void op_nmi_exit(void)