4 * @remark Copyright 2002-2009 OProfile authors
5 * @remark Read the file COPYING
7 * @author John Levon <levon@movementarian.org>
8 * @author Robert Richter <robert.richter@amd.com>
9 * @author Barry Kasindorf <barry.kasindorf@amd.com>
10 * @author Jason Yeh <jason.yeh@amd.com>
11 * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
14 #include <linux/init.h>
15 #include <linux/notifier.h>
16 #include <linux/smp.h>
17 #include <linux/oprofile.h>
18 #include <linux/sysdev.h>
19 #include <linux/slab.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kdebug.h>
22 #include <linux/cpu.h>
27 #include "op_counter.h"
28 #include "op_x86_model.h"
30 static struct op_x86_model_spec
*model
;
31 static DEFINE_PER_CPU(struct op_msrs
, cpu_msrs
);
32 static DEFINE_PER_CPU(unsigned long, saved_lvtpc
);
34 /* 0 == registered but off, 1 == registered and on */
35 static int nmi_enabled
= 0;
38 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
39 extern atomic_t multiplex_counter
;
42 struct op_counter_config counter_config
[OP_MAX_COUNTER
];
44 /* common functions */
46 u64
op_x86_get_ctrl(struct op_x86_model_spec
const *model
,
47 struct op_counter_config
*counter_config
)
50 u16 event
= (u16
)counter_config
->event
;
52 val
|= ARCH_PERFMON_EVENTSEL_INT
;
53 val
|= counter_config
->user
? ARCH_PERFMON_EVENTSEL_USR
: 0;
54 val
|= counter_config
->kernel
? ARCH_PERFMON_EVENTSEL_OS
: 0;
55 val
|= (counter_config
->unit_mask
& 0xFF) << 8;
56 event
&= model
->event_mask
? model
->event_mask
: 0xFF;
58 val
|= (event
& 0x0F00) << 24;
64 static int profile_exceptions_notify(struct notifier_block
*self
,
65 unsigned long val
, void *data
)
67 struct die_args
*args
= (struct die_args
*)data
;
68 int ret
= NOTIFY_DONE
;
69 int cpu
= smp_processor_id();
74 model
->check_ctrs(args
->regs
, &per_cpu(cpu_msrs
, cpu
));
83 static void nmi_cpu_save_registers(struct op_msrs
*msrs
)
85 struct op_msr
*counters
= msrs
->counters
;
86 struct op_msr
*controls
= msrs
->controls
;
89 for (i
= 0; i
< model
->num_counters
; ++i
) {
91 rdmsrl(counters
[i
].addr
, counters
[i
].saved
);
94 for (i
= 0; i
< model
->num_controls
; ++i
) {
96 rdmsrl(controls
[i
].addr
, controls
[i
].saved
);
100 static void nmi_cpu_start(void *dummy
)
102 struct op_msrs
const *msrs
= &__get_cpu_var(cpu_msrs
);
106 static int nmi_start(void)
108 on_each_cpu(nmi_cpu_start
, NULL
, 1);
112 static void nmi_cpu_stop(void *dummy
)
114 struct op_msrs
const *msrs
= &__get_cpu_var(cpu_msrs
);
118 static void nmi_stop(void)
120 on_each_cpu(nmi_cpu_stop
, NULL
, 1);
123 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
125 static DEFINE_PER_CPU(int, switch_index
);
127 inline int op_x86_phys_to_virt(int phys
)
129 return __get_cpu_var(switch_index
) + phys
;
132 static void nmi_shutdown_mux(void)
135 for_each_possible_cpu(i
) {
136 kfree(per_cpu(cpu_msrs
, i
).multiplex
);
137 per_cpu(cpu_msrs
, i
).multiplex
= NULL
;
138 per_cpu(switch_index
, i
) = 0;
142 static int nmi_setup_mux(void)
144 size_t multiplex_size
=
145 sizeof(struct op_msr
) * model
->num_virt_counters
;
147 for_each_possible_cpu(i
) {
148 per_cpu(cpu_msrs
, i
).multiplex
=
149 kmalloc(multiplex_size
, GFP_KERNEL
);
150 if (!per_cpu(cpu_msrs
, i
).multiplex
)
156 static void nmi_cpu_setup_mux(int cpu
, struct op_msrs
const * const msrs
)
159 struct op_msr
*multiplex
= msrs
->multiplex
;
161 for (i
= 0; i
< model
->num_virt_counters
; ++i
) {
162 if (counter_config
[i
].enabled
) {
163 multiplex
[i
].saved
= -(u64
)counter_config
[i
].count
;
165 multiplex
[i
].addr
= 0;
166 multiplex
[i
].saved
= 0;
170 per_cpu(switch_index
, cpu
) = 0;
173 static void nmi_cpu_save_mpx_registers(struct op_msrs
*msrs
)
175 struct op_msr
*multiplex
= msrs
->multiplex
;
178 for (i
= 0; i
< model
->num_counters
; ++i
) {
179 int virt
= op_x86_phys_to_virt(i
);
180 if (multiplex
[virt
].addr
)
181 rdmsrl(multiplex
[virt
].addr
, multiplex
[virt
].saved
);
185 static void nmi_cpu_restore_mpx_registers(struct op_msrs
*msrs
)
187 struct op_msr
*multiplex
= msrs
->multiplex
;
190 for (i
= 0; i
< model
->num_counters
; ++i
) {
191 int virt
= op_x86_phys_to_virt(i
);
192 if (multiplex
[virt
].addr
)
193 wrmsrl(multiplex
[virt
].addr
, multiplex
[virt
].saved
);
197 static void nmi_cpu_switch(void *dummy
)
199 int cpu
= smp_processor_id();
200 int si
= per_cpu(switch_index
, cpu
);
201 struct op_msrs
*msrs
= &per_cpu(cpu_msrs
, cpu
);
204 nmi_cpu_save_mpx_registers(msrs
);
206 /* move to next set */
207 si
+= model
->num_counters
;
208 if ((si
> model
->num_virt_counters
) || (counter_config
[si
].count
== 0))
209 per_cpu(switch_index
, cpu
) = 0;
211 per_cpu(switch_index
, cpu
) = si
;
213 model
->switch_ctrl(model
, msrs
);
214 nmi_cpu_restore_mpx_registers(msrs
);
221 * Quick check to see if multiplexing is necessary.
222 * The check should be sufficient since counters are used
225 static int nmi_multiplex_on(void)
227 return counter_config
[model
->num_counters
].count
? 0 : -EINVAL
;
230 static int nmi_switch_event(void)
232 if (!model
->switch_ctrl
)
233 return -ENOSYS
; /* not implemented */
234 if (nmi_multiplex_on() < 0)
235 return -EINVAL
; /* not necessary */
237 on_each_cpu(nmi_cpu_switch
, NULL
, 1);
239 atomic_inc(&multiplex_counter
);
246 inline int op_x86_phys_to_virt(int phys
) { return phys
; }
247 static inline void nmi_shutdown_mux(void) { }
248 static inline int nmi_setup_mux(void) { return 1; }
250 nmi_cpu_setup_mux(int cpu
, struct op_msrs
const * const msrs
) { }
254 static void free_msrs(void)
257 for_each_possible_cpu(i
) {
258 kfree(per_cpu(cpu_msrs
, i
).counters
);
259 per_cpu(cpu_msrs
, i
).counters
= NULL
;
260 kfree(per_cpu(cpu_msrs
, i
).controls
);
261 per_cpu(cpu_msrs
, i
).controls
= NULL
;
265 static int allocate_msrs(void)
267 size_t controls_size
= sizeof(struct op_msr
) * model
->num_controls
;
268 size_t counters_size
= sizeof(struct op_msr
) * model
->num_counters
;
271 for_each_possible_cpu(i
) {
272 per_cpu(cpu_msrs
, i
).counters
= kmalloc(counters_size
,
274 if (!per_cpu(cpu_msrs
, i
).counters
)
276 per_cpu(cpu_msrs
, i
).controls
= kmalloc(controls_size
,
278 if (!per_cpu(cpu_msrs
, i
).controls
)
285 static void nmi_cpu_setup(void *dummy
)
287 int cpu
= smp_processor_id();
288 struct op_msrs
*msrs
= &per_cpu(cpu_msrs
, cpu
);
289 nmi_cpu_save_registers(msrs
);
290 spin_lock(&oprofilefs_lock
);
291 model
->setup_ctrs(model
, msrs
);
292 nmi_cpu_setup_mux(cpu
, msrs
);
293 spin_unlock(&oprofilefs_lock
);
294 per_cpu(saved_lvtpc
, cpu
) = apic_read(APIC_LVTPC
);
295 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
298 static struct notifier_block profile_exceptions_nb
= {
299 .notifier_call
= profile_exceptions_notify
,
304 static int nmi_setup(void)
309 if (!allocate_msrs())
311 else if (!nmi_setup_mux())
314 err
= register_die_notifier(&profile_exceptions_nb
);
322 /* We need to serialize save and setup for HT because the subset
323 * of msrs are distinct for save and setup operations
326 /* Assume saved/restored counters are the same on all CPUs */
327 model
->fill_in_addresses(&per_cpu(cpu_msrs
, 0));
328 for_each_possible_cpu(cpu
) {
330 memcpy(per_cpu(cpu_msrs
, cpu
).counters
,
331 per_cpu(cpu_msrs
, 0).counters
,
332 sizeof(struct op_msr
) * model
->num_counters
);
334 memcpy(per_cpu(cpu_msrs
, cpu
).controls
,
335 per_cpu(cpu_msrs
, 0).controls
,
336 sizeof(struct op_msr
) * model
->num_controls
);
337 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
338 memcpy(per_cpu(cpu_msrs
, cpu
).multiplex
,
339 per_cpu(cpu_msrs
, 0).multiplex
,
340 sizeof(struct op_msr
) * model
->num_virt_counters
);
344 on_each_cpu(nmi_cpu_setup
, NULL
, 1);
349 static void nmi_cpu_restore_registers(struct op_msrs
*msrs
)
351 struct op_msr
*counters
= msrs
->counters
;
352 struct op_msr
*controls
= msrs
->controls
;
355 for (i
= 0; i
< model
->num_controls
; ++i
) {
356 if (controls
[i
].addr
)
357 wrmsrl(controls
[i
].addr
, controls
[i
].saved
);
360 for (i
= 0; i
< model
->num_counters
; ++i
) {
361 if (counters
[i
].addr
)
362 wrmsrl(counters
[i
].addr
, counters
[i
].saved
);
366 static void nmi_cpu_shutdown(void *dummy
)
369 int cpu
= smp_processor_id();
370 struct op_msrs
*msrs
= &per_cpu(cpu_msrs
, cpu
);
372 /* restoring APIC_LVTPC can trigger an apic error because the delivery
373 * mode and vector nr combination can be illegal. That's by design: on
374 * power on apic lvt contain a zero vector nr which are legal only for
375 * NMI delivery mode. So inhibit apic err before restoring lvtpc
377 v
= apic_read(APIC_LVTERR
);
378 apic_write(APIC_LVTERR
, v
| APIC_LVT_MASKED
);
379 apic_write(APIC_LVTPC
, per_cpu(saved_lvtpc
, cpu
));
380 apic_write(APIC_LVTERR
, v
);
381 nmi_cpu_restore_registers(msrs
);
384 static void nmi_shutdown(void)
386 struct op_msrs
*msrs
;
389 on_each_cpu(nmi_cpu_shutdown
, NULL
, 1);
390 unregister_die_notifier(&profile_exceptions_nb
);
392 msrs
= &get_cpu_var(cpu_msrs
);
393 model
->shutdown(msrs
);
395 put_cpu_var(cpu_msrs
);
398 static int nmi_create_files(struct super_block
*sb
, struct dentry
*root
)
402 for (i
= 0; i
< model
->num_virt_counters
; ++i
) {
406 #ifndef CONFIG_OPROFILE_EVENT_MULTIPLEX
407 /* quick little hack to _not_ expose a counter if it is not
408 * available for use. This should protect userspace app.
409 * NOTE: assumes 1:1 mapping here (that counters are organized
410 * sequentially in their struct assignment).
412 if (unlikely(!avail_to_resrv_perfctr_nmi_bit(i
)))
414 #endif /* CONFIG_OPROFILE_EVENT_MULTIPLEX */
416 snprintf(buf
, sizeof(buf
), "%d", i
);
417 dir
= oprofilefs_mkdir(sb
, root
, buf
);
418 oprofilefs_create_ulong(sb
, dir
, "enabled", &counter_config
[i
].enabled
);
419 oprofilefs_create_ulong(sb
, dir
, "event", &counter_config
[i
].event
);
420 oprofilefs_create_ulong(sb
, dir
, "count", &counter_config
[i
].count
);
421 oprofilefs_create_ulong(sb
, dir
, "unit_mask", &counter_config
[i
].unit_mask
);
422 oprofilefs_create_ulong(sb
, dir
, "kernel", &counter_config
[i
].kernel
);
423 oprofilefs_create_ulong(sb
, dir
, "user", &counter_config
[i
].user
);
430 static int oprofile_cpu_notifier(struct notifier_block
*b
, unsigned long action
,
433 int cpu
= (unsigned long)data
;
435 case CPU_DOWN_FAILED
:
437 smp_call_function_single(cpu
, nmi_cpu_start
, NULL
, 0);
439 case CPU_DOWN_PREPARE
:
440 smp_call_function_single(cpu
, nmi_cpu_stop
, NULL
, 1);
446 static struct notifier_block oprofile_cpu_nb
= {
447 .notifier_call
= oprofile_cpu_notifier
453 static int nmi_suspend(struct sys_device
*dev
, pm_message_t state
)
455 /* Only one CPU left, just stop that one */
456 if (nmi_enabled
== 1)
461 static int nmi_resume(struct sys_device
*dev
)
463 if (nmi_enabled
== 1)
468 static struct sysdev_class oprofile_sysclass
= {
470 .resume
= nmi_resume
,
471 .suspend
= nmi_suspend
,
474 static struct sys_device device_oprofile
= {
476 .cls
= &oprofile_sysclass
,
479 static int __init
init_sysfs(void)
483 error
= sysdev_class_register(&oprofile_sysclass
);
485 error
= sysdev_register(&device_oprofile
);
489 static void exit_sysfs(void)
491 sysdev_unregister(&device_oprofile
);
492 sysdev_class_unregister(&oprofile_sysclass
);
496 #define init_sysfs() do { } while (0)
497 #define exit_sysfs() do { } while (0)
498 #endif /* CONFIG_PM */
500 static int __init
p4_init(char **cpu_type
)
502 __u8 cpu_model
= boot_cpu_data
.x86_model
;
504 if (cpu_model
> 6 || cpu_model
== 5)
508 *cpu_type
= "i386/p4";
512 switch (smp_num_siblings
) {
514 *cpu_type
= "i386/p4";
519 *cpu_type
= "i386/p4-ht";
520 model
= &op_p4_ht2_spec
;
525 printk(KERN_INFO
"oprofile: P4 HyperThreading detected with > 2 threads\n");
526 printk(KERN_INFO
"oprofile: Reverting to timer mode.\n");
530 static int force_arch_perfmon
;
531 static int force_cpu_type(const char *str
, struct kernel_param
*kp
)
533 if (!strcmp(str
, "arch_perfmon")) {
534 force_arch_perfmon
= 1;
535 printk(KERN_INFO
"oprofile: forcing architectural perfmon\n");
540 module_param_call(cpu_type
, force_cpu_type
, NULL
, NULL
, 0);
542 static int __init
ppro_init(char **cpu_type
)
544 __u8 cpu_model
= boot_cpu_data
.x86_model
;
545 struct op_x86_model_spec
*spec
= &op_ppro_spec
; /* default */
547 if (force_arch_perfmon
&& cpu_has_arch_perfmon
)
552 *cpu_type
= "i386/ppro";
555 *cpu_type
= "i386/pii";
559 *cpu_type
= "i386/piii";
563 *cpu_type
= "i386/p6_mobile";
566 *cpu_type
= "i386/core";
569 *cpu_type
= "i386/core_2";
572 spec
= &op_arch_perfmon_spec
;
573 *cpu_type
= "i386/core_i7";
576 *cpu_type
= "i386/atom";
587 /* in order to get sysfs right */
588 static int using_nmi
;
590 int __init
op_nmi_init(struct oprofile_operations
*ops
)
592 __u8 vendor
= boot_cpu_data
.x86_vendor
;
593 __u8 family
= boot_cpu_data
.x86
;
594 char *cpu_type
= NULL
;
602 /* Needs to be at least an Athlon (or hammer in 32bit mode) */
606 cpu_type
= "i386/athlon";
610 * Actually it could be i386/hammer too, but
611 * give user space an consistent name.
613 cpu_type
= "x86-64/hammer";
616 cpu_type
= "x86-64/family10";
619 cpu_type
= "x86-64/family11h";
624 model
= &op_amd_spec
;
627 case X86_VENDOR_INTEL
:
634 /* A P6-class processor */
636 ppro_init(&cpu_type
);
646 if (!cpu_has_arch_perfmon
)
649 /* use arch perfmon as fallback */
650 cpu_type
= "i386/arch_perfmon";
651 model
= &op_arch_perfmon_spec
;
659 register_cpu_notifier(&oprofile_cpu_nb
);
661 /* default values, can be overwritten by model */
662 ops
->create_files
= nmi_create_files
;
663 ops
->setup
= nmi_setup
;
664 ops
->shutdown
= nmi_shutdown
;
665 ops
->start
= nmi_start
;
666 ops
->stop
= nmi_stop
;
667 ops
->cpu_type
= cpu_type
;
668 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
669 ops
->switch_events
= nmi_switch_event
;
673 ret
= model
->init(ops
);
679 printk(KERN_INFO
"oprofile: using NMI interrupt.\n");
683 void op_nmi_exit(void)
688 unregister_cpu_notifier(&oprofile_cpu_nb
);