x86, nmi: Create new NMI handler routines
[deliverable/linux.git] / arch / x86 / oprofile / nmi_int.c
CommitLineData
1da177e4
LT
1/**
2 * @file nmi_int.c
3 *
4d4036e0 4 * @remark Copyright 2002-2009 OProfile authors
1da177e4
LT
5 * @remark Read the file COPYING
6 *
7 * @author John Levon <levon@movementarian.org>
adf5ec0b 8 * @author Robert Richter <robert.richter@amd.com>
4d4036e0
JY
9 * @author Barry Kasindorf <barry.kasindorf@amd.com>
10 * @author Jason Yeh <jason.yeh@amd.com>
11 * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
1da177e4
LT
12 */
13
14#include <linux/init.h>
15#include <linux/notifier.h>
16#include <linux/smp.h>
17#include <linux/oprofile.h>
f3c6ea1b 18#include <linux/syscore_ops.h>
1da177e4 19#include <linux/slab.h>
1cfcea1b 20#include <linux/moduleparam.h>
1eeb66a1 21#include <linux/kdebug.h>
80a8c9ff 22#include <linux/cpu.h>
1da177e4
LT
23#include <asm/nmi.h>
24#include <asm/msr.h>
25#include <asm/apic.h>
b75f53db 26
1da177e4
LT
27#include "op_counter.h"
28#include "op_x86_model.h"
2fbe7b25 29
259a83a8 30static struct op_x86_model_spec *model;
d18d00f5
MT
31static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
32static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
2fbe7b25 33
6ae56b55
RR
34/* must be protected with get_online_cpus()/put_online_cpus(): */
35static int nmi_enabled;
36static int ctr_running;
1da177e4 37
4d4036e0
JY
38struct op_counter_config counter_config[OP_MAX_COUNTER];
39
3370d358
RR
40/* common functions */
41
42u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
43 struct op_counter_config *counter_config)
44{
45 u64 val = 0;
46 u16 event = (u16)counter_config->event;
47
48 val |= ARCH_PERFMON_EVENTSEL_INT;
49 val |= counter_config->user ? ARCH_PERFMON_EVENTSEL_USR : 0;
50 val |= counter_config->kernel ? ARCH_PERFMON_EVENTSEL_OS : 0;
51 val |= (counter_config->unit_mask & 0xFF) << 8;
914a76ca
AK
52 counter_config->extra &= (ARCH_PERFMON_EVENTSEL_INV |
53 ARCH_PERFMON_EVENTSEL_EDGE |
54 ARCH_PERFMON_EVENTSEL_CMASK);
55 val |= counter_config->extra;
3370d358
RR
56 event &= model->event_mask ? model->event_mask : 0xFF;
57 val |= event & 0xFF;
58 val |= (event & 0x0F00) << 24;
59
60 return val;
61}
62
63
c7c19f8e
AB
64static int profile_exceptions_notify(struct notifier_block *self,
65 unsigned long val, void *data)
1da177e4 66{
2fbe7b25
DZ
67 struct die_args *args = (struct die_args *)data;
68 int ret = NOTIFY_DONE;
2fbe7b25 69
b75f53db 70 switch (val) {
2fbe7b25 71 case DIE_NMI:
de654649
RR
72 if (ctr_running)
73 model->check_ctrs(args->regs, &__get_cpu_var(cpu_msrs));
74 else if (!nmi_enabled)
75 break;
76 else
77 model->stop(&__get_cpu_var(cpu_msrs));
5b75af0a 78 ret = NOTIFY_STOP;
2fbe7b25
DZ
79 break;
80 default:
81 break;
82 }
83 return ret;
1da177e4 84}
2fbe7b25 85
b75f53db 86static void nmi_cpu_save_registers(struct op_msrs *msrs)
1da177e4 87{
b75f53db
CM
88 struct op_msr *counters = msrs->counters;
89 struct op_msr *controls = msrs->controls;
1da177e4
LT
90 unsigned int i;
91
1a245c45 92 for (i = 0; i < model->num_counters; ++i) {
95e74e62
RR
93 if (counters[i].addr)
94 rdmsrl(counters[i].addr, counters[i].saved);
1da177e4 95 }
b75f53db 96
1a245c45 97 for (i = 0; i < model->num_controls; ++i) {
95e74e62
RR
98 if (controls[i].addr)
99 rdmsrl(controls[i].addr, controls[i].saved);
1da177e4
LT
100 }
101}
102
b28d1b92
RR
103static void nmi_cpu_start(void *dummy)
104{
105 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
2623a1d5
RR
106 if (!msrs->controls)
107 WARN_ON_ONCE(1);
108 else
109 model->start(msrs);
b28d1b92
RR
110}
111
112static int nmi_start(void)
113{
6ae56b55 114 get_online_cpus();
6ae56b55 115 ctr_running = 1;
8fe7e94e
RR
116 /* make ctr_running visible to the nmi handler: */
117 smp_mb();
118 on_each_cpu(nmi_cpu_start, NULL, 1);
6ae56b55 119 put_online_cpus();
b28d1b92
RR
120 return 0;
121}
122
123static void nmi_cpu_stop(void *dummy)
124{
125 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
2623a1d5
RR
126 if (!msrs->controls)
127 WARN_ON_ONCE(1);
128 else
129 model->stop(msrs);
b28d1b92
RR
130}
131
132static void nmi_stop(void)
133{
6ae56b55 134 get_online_cpus();
b28d1b92 135 on_each_cpu(nmi_cpu_stop, NULL, 1);
6ae56b55
RR
136 ctr_running = 0;
137 put_online_cpus();
b28d1b92
RR
138}
139
d8471ad3
RR
140#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
141
142static DEFINE_PER_CPU(int, switch_index);
143
39e97f40
RR
144static inline int has_mux(void)
145{
146 return !!model->switch_ctrl;
147}
148
d8471ad3
RR
149inline int op_x86_phys_to_virt(int phys)
150{
0a3aee0d 151 return __this_cpu_read(switch_index) + phys;
d8471ad3
RR
152}
153
61d149d5
RR
154inline int op_x86_virt_to_phys(int virt)
155{
156 return virt % model->num_counters;
157}
158
6ab82f95
RR
159static void nmi_shutdown_mux(void)
160{
161 int i;
39e97f40
RR
162
163 if (!has_mux())
164 return;
165
6ab82f95
RR
166 for_each_possible_cpu(i) {
167 kfree(per_cpu(cpu_msrs, i).multiplex);
168 per_cpu(cpu_msrs, i).multiplex = NULL;
169 per_cpu(switch_index, i) = 0;
170 }
171}
172
173static int nmi_setup_mux(void)
174{
175 size_t multiplex_size =
176 sizeof(struct op_msr) * model->num_virt_counters;
177 int i;
39e97f40
RR
178
179 if (!has_mux())
180 return 1;
181
6ab82f95
RR
182 for_each_possible_cpu(i) {
183 per_cpu(cpu_msrs, i).multiplex =
c17c8fbf 184 kzalloc(multiplex_size, GFP_KERNEL);
6ab82f95
RR
185 if (!per_cpu(cpu_msrs, i).multiplex)
186 return 0;
187 }
39e97f40 188
6ab82f95
RR
189 return 1;
190}
191
48fb4b46
RR
192static void nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs)
193{
194 int i;
195 struct op_msr *multiplex = msrs->multiplex;
196
39e97f40
RR
197 if (!has_mux())
198 return;
199
48fb4b46
RR
200 for (i = 0; i < model->num_virt_counters; ++i) {
201 if (counter_config[i].enabled) {
202 multiplex[i].saved = -(u64)counter_config[i].count;
203 } else {
48fb4b46
RR
204 multiplex[i].saved = 0;
205 }
206 }
207
208 per_cpu(switch_index, cpu) = 0;
209}
210
d0f585dd
RR
211static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs)
212{
68dc819c 213 struct op_msr *counters = msrs->counters;
d0f585dd
RR
214 struct op_msr *multiplex = msrs->multiplex;
215 int i;
216
217 for (i = 0; i < model->num_counters; ++i) {
218 int virt = op_x86_phys_to_virt(i);
68dc819c
RR
219 if (counters[i].addr)
220 rdmsrl(counters[i].addr, multiplex[virt].saved);
d0f585dd
RR
221 }
222}
223
224static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs)
225{
68dc819c 226 struct op_msr *counters = msrs->counters;
d0f585dd
RR
227 struct op_msr *multiplex = msrs->multiplex;
228 int i;
229
230 for (i = 0; i < model->num_counters; ++i) {
231 int virt = op_x86_phys_to_virt(i);
68dc819c
RR
232 if (counters[i].addr)
233 wrmsrl(counters[i].addr, multiplex[virt].saved);
d0f585dd
RR
234 }
235}
236
b28d1b92
RR
237static void nmi_cpu_switch(void *dummy)
238{
239 int cpu = smp_processor_id();
240 int si = per_cpu(switch_index, cpu);
241 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
242
243 nmi_cpu_stop(NULL);
244 nmi_cpu_save_mpx_registers(msrs);
245
246 /* move to next set */
247 si += model->num_counters;
d8cc108f 248 if ((si >= model->num_virt_counters) || (counter_config[si].count == 0))
b28d1b92
RR
249 per_cpu(switch_index, cpu) = 0;
250 else
251 per_cpu(switch_index, cpu) = si;
252
253 model->switch_ctrl(model, msrs);
254 nmi_cpu_restore_mpx_registers(msrs);
255
256 nmi_cpu_start(NULL);
257}
258
259
260/*
261 * Quick check to see if multiplexing is necessary.
262 * The check should be sufficient since counters are used
263 * in ordre.
264 */
265static int nmi_multiplex_on(void)
266{
267 return counter_config[model->num_counters].count ? 0 : -EINVAL;
268}
269
270static int nmi_switch_event(void)
271{
39e97f40 272 if (!has_mux())
b28d1b92
RR
273 return -ENOSYS; /* not implemented */
274 if (nmi_multiplex_on() < 0)
275 return -EINVAL; /* not necessary */
276
6ae56b55
RR
277 get_online_cpus();
278 if (ctr_running)
279 on_each_cpu(nmi_cpu_switch, NULL, 1);
280 put_online_cpus();
b28d1b92 281
b28d1b92
RR
282 return 0;
283}
284
52805144
RR
285static inline void mux_init(struct oprofile_operations *ops)
286{
287 if (has_mux())
288 ops->switch_events = nmi_switch_event;
289}
290
4d015f79
RR
291static void mux_clone(int cpu)
292{
293 if (!has_mux())
294 return;
295
296 memcpy(per_cpu(cpu_msrs, cpu).multiplex,
297 per_cpu(cpu_msrs, 0).multiplex,
298 sizeof(struct op_msr) * model->num_virt_counters);
299}
300
d8471ad3
RR
301#else
302
303inline int op_x86_phys_to_virt(int phys) { return phys; }
61d149d5 304inline int op_x86_virt_to_phys(int virt) { return virt; }
6ab82f95
RR
305static inline void nmi_shutdown_mux(void) { }
306static inline int nmi_setup_mux(void) { return 1; }
48fb4b46
RR
307static inline void
308nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs) { }
52805144 309static inline void mux_init(struct oprofile_operations *ops) { }
4d015f79 310static void mux_clone(int cpu) { }
d8471ad3
RR
311
312#endif
313
1da177e4
LT
314static void free_msrs(void)
315{
316 int i;
c8912599 317 for_each_possible_cpu(i) {
d18d00f5
MT
318 kfree(per_cpu(cpu_msrs, i).counters);
319 per_cpu(cpu_msrs, i).counters = NULL;
320 kfree(per_cpu(cpu_msrs, i).controls);
321 per_cpu(cpu_msrs, i).controls = NULL;
1da177e4 322 }
8f5a2dd8 323 nmi_shutdown_mux();
1da177e4
LT
324}
325
1da177e4
LT
326static int allocate_msrs(void)
327{
1da177e4
LT
328 size_t controls_size = sizeof(struct op_msr) * model->num_controls;
329 size_t counters_size = sizeof(struct op_msr) * model->num_counters;
330
4c168eaf 331 int i;
0939c17c 332 for_each_possible_cpu(i) {
c17c8fbf 333 per_cpu(cpu_msrs, i).counters = kzalloc(counters_size,
6ab82f95
RR
334 GFP_KERNEL);
335 if (!per_cpu(cpu_msrs, i).counters)
8f5a2dd8 336 goto fail;
c17c8fbf 337 per_cpu(cpu_msrs, i).controls = kzalloc(controls_size,
6ab82f95
RR
338 GFP_KERNEL);
339 if (!per_cpu(cpu_msrs, i).controls)
8f5a2dd8 340 goto fail;
1da177e4
LT
341 }
342
8f5a2dd8
RR
343 if (!nmi_setup_mux())
344 goto fail;
345
6ab82f95 346 return 1;
8f5a2dd8
RR
347
348fail:
349 free_msrs();
350 return 0;
1da177e4
LT
351}
352
b75f53db 353static void nmi_cpu_setup(void *dummy)
1da177e4
LT
354{
355 int cpu = smp_processor_id();
d18d00f5 356 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
44ab9a6b 357 nmi_cpu_save_registers(msrs);
1da177e4 358 spin_lock(&oprofilefs_lock);
ef8828dd 359 model->setup_ctrs(model, msrs);
6bfccd09 360 nmi_cpu_setup_mux(cpu, msrs);
1da177e4 361 spin_unlock(&oprofilefs_lock);
d18d00f5 362 per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
1da177e4
LT
363 apic_write(APIC_LVTPC, APIC_DM_NMI);
364}
365
2fbe7b25
DZ
366static struct notifier_block profile_exceptions_nb = {
367 .notifier_call = profile_exceptions_notify,
368 .next = NULL,
166d7514 369 .priority = NMI_LOCAL_LOW_PRIOR,
2fbe7b25 370};
1da177e4 371
44ab9a6b 372static void nmi_cpu_restore_registers(struct op_msrs *msrs)
1da177e4 373{
b75f53db
CM
374 struct op_msr *counters = msrs->counters;
375 struct op_msr *controls = msrs->controls;
1da177e4
LT
376 unsigned int i;
377
1a245c45 378 for (i = 0; i < model->num_controls; ++i) {
95e74e62
RR
379 if (controls[i].addr)
380 wrmsrl(controls[i].addr, controls[i].saved);
1da177e4 381 }
b75f53db 382
1a245c45 383 for (i = 0; i < model->num_counters; ++i) {
95e74e62
RR
384 if (counters[i].addr)
385 wrmsrl(counters[i].addr, counters[i].saved);
1da177e4
LT
386 }
387}
1da177e4 388
b75f53db 389static void nmi_cpu_shutdown(void *dummy)
1da177e4
LT
390{
391 unsigned int v;
392 int cpu = smp_processor_id();
82a22528 393 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
b75f53db 394
1da177e4
LT
395 /* restoring APIC_LVTPC can trigger an apic error because the delivery
396 * mode and vector nr combination can be illegal. That's by design: on
397 * power on apic lvt contain a zero vector nr which are legal only for
398 * NMI delivery mode. So inhibit apic err before restoring lvtpc
399 */
400 v = apic_read(APIC_LVTERR);
401 apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
d18d00f5 402 apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
1da177e4 403 apic_write(APIC_LVTERR, v);
44ab9a6b 404 nmi_cpu_restore_registers(msrs);
bae663bc
RR
405 if (model->cpu_down)
406 model->cpu_down();
1da177e4
LT
407}
408
6ae56b55
RR
409static void nmi_cpu_up(void *dummy)
410{
411 if (nmi_enabled)
412 nmi_cpu_setup(dummy);
413 if (ctr_running)
414 nmi_cpu_start(dummy);
415}
416
417static void nmi_cpu_down(void *dummy)
418{
419 if (ctr_running)
420 nmi_cpu_stop(dummy);
421 if (nmi_enabled)
422 nmi_cpu_shutdown(dummy);
423}
424
b75f53db 425static int nmi_create_files(struct super_block *sb, struct dentry *root)
1da177e4
LT
426{
427 unsigned int i;
428
4d4036e0 429 for (i = 0; i < model->num_virt_counters; ++i) {
b75f53db 430 struct dentry *dir;
0c6856f7 431 char buf[4];
b75f53db
CM
432
433 /* quick little hack to _not_ expose a counter if it is not
cb9c448c
DZ
434 * available for use. This should protect userspace app.
435 * NOTE: assumes 1:1 mapping here (that counters are organized
436 * sequentially in their struct assignment).
437 */
11be1a7b 438 if (!avail_to_resrv_perfctr_nmi_bit(op_x86_virt_to_phys(i)))
cb9c448c
DZ
439 continue;
440
0c6856f7 441 snprintf(buf, sizeof(buf), "%d", i);
1da177e4 442 dir = oprofilefs_mkdir(sb, root, buf);
b75f53db
CM
443 oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
444 oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
445 oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
446 oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
447 oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
448 oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
914a76ca 449 oprofilefs_create_ulong(sb, dir, "extra", &counter_config[i].extra);
1da177e4
LT
450 }
451
452 return 0;
453}
b75f53db 454
69046d43
RR
455static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
456 void *data)
457{
458 int cpu = (unsigned long)data;
459 switch (action) {
460 case CPU_DOWN_FAILED:
461 case CPU_ONLINE:
6ae56b55 462 smp_call_function_single(cpu, nmi_cpu_up, NULL, 0);
69046d43
RR
463 break;
464 case CPU_DOWN_PREPARE:
6ae56b55 465 smp_call_function_single(cpu, nmi_cpu_down, NULL, 1);
69046d43
RR
466 break;
467 }
468 return NOTIFY_DONE;
469}
470
471static struct notifier_block oprofile_cpu_nb = {
472 .notifier_call = oprofile_cpu_notifier
473};
69046d43 474
d30d64c6
RR
475static int nmi_setup(void)
476{
477 int err = 0;
478 int cpu;
479
480 if (!allocate_msrs())
481 return -ENOMEM;
482
483 /* We need to serialize save and setup for HT because the subset
484 * of msrs are distinct for save and setup operations
485 */
486
487 /* Assume saved/restored counters are the same on all CPUs */
488 err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
489 if (err)
490 goto fail;
491
492 for_each_possible_cpu(cpu) {
493 if (!cpu)
494 continue;
495
496 memcpy(per_cpu(cpu_msrs, cpu).counters,
497 per_cpu(cpu_msrs, 0).counters,
498 sizeof(struct op_msr) * model->num_counters);
499
500 memcpy(per_cpu(cpu_msrs, cpu).controls,
501 per_cpu(cpu_msrs, 0).controls,
502 sizeof(struct op_msr) * model->num_controls);
503
504 mux_clone(cpu);
505 }
506
507 nmi_enabled = 0;
508 ctr_running = 0;
8fe7e94e
RR
509 /* make variables visible to the nmi handler: */
510 smp_mb();
d30d64c6
RR
511 err = register_die_notifier(&profile_exceptions_nb);
512 if (err)
513 goto fail;
514
515 get_online_cpus();
3de668ee 516 register_cpu_notifier(&oprofile_cpu_nb);
d30d64c6 517 nmi_enabled = 1;
8fe7e94e
RR
518 /* make nmi_enabled visible to the nmi handler: */
519 smp_mb();
520 on_each_cpu(nmi_cpu_setup, NULL, 1);
d30d64c6
RR
521 put_online_cpus();
522
523 return 0;
524fail:
525 free_msrs();
526 return err;
527}
528
529static void nmi_shutdown(void)
530{
531 struct op_msrs *msrs;
532
533 get_online_cpus();
3de668ee 534 unregister_cpu_notifier(&oprofile_cpu_nb);
d30d64c6
RR
535 on_each_cpu(nmi_cpu_shutdown, NULL, 1);
536 nmi_enabled = 0;
537 ctr_running = 0;
538 put_online_cpus();
8fe7e94e
RR
539 /* make variables visible to the nmi handler: */
540 smp_mb();
d30d64c6
RR
541 unregister_die_notifier(&profile_exceptions_nb);
542 msrs = &get_cpu_var(cpu_msrs);
543 model->shutdown(msrs);
544 free_msrs();
545 put_cpu_var(cpu_msrs);
546}
547
69046d43
RR
548#ifdef CONFIG_PM
549
f3c6ea1b 550static int nmi_suspend(void)
69046d43
RR
551{
552 /* Only one CPU left, just stop that one */
553 if (nmi_enabled == 1)
554 nmi_cpu_stop(NULL);
555 return 0;
556}
557
f3c6ea1b 558static void nmi_resume(void)
69046d43
RR
559{
560 if (nmi_enabled == 1)
561 nmi_cpu_start(NULL);
69046d43
RR
562}
563
f3c6ea1b 564static struct syscore_ops oprofile_syscore_ops = {
69046d43
RR
565 .resume = nmi_resume,
566 .suspend = nmi_suspend,
567};
568
f3c6ea1b 569static void __init init_suspend_resume(void)
69046d43 570{
f3c6ea1b 571 register_syscore_ops(&oprofile_syscore_ops);
69046d43
RR
572}
573
f3c6ea1b 574static void exit_suspend_resume(void)
69046d43 575{
f3c6ea1b 576 unregister_syscore_ops(&oprofile_syscore_ops);
69046d43
RR
577}
578
579#else
269f45c2 580
f3c6ea1b
RW
581static inline void init_suspend_resume(void) { }
582static inline void exit_suspend_resume(void) { }
269f45c2 583
69046d43
RR
584#endif /* CONFIG_PM */
585
b75f53db 586static int __init p4_init(char **cpu_type)
1da177e4
LT
587{
588 __u8 cpu_model = boot_cpu_data.x86_model;
589
1f3d7b60 590 if (cpu_model > 6 || cpu_model == 5)
1da177e4
LT
591 return 0;
592
593#ifndef CONFIG_SMP
594 *cpu_type = "i386/p4";
595 model = &op_p4_spec;
596 return 1;
597#else
598 switch (smp_num_siblings) {
b75f53db
CM
599 case 1:
600 *cpu_type = "i386/p4";
601 model = &op_p4_spec;
602 return 1;
603
604 case 2:
605 *cpu_type = "i386/p4-ht";
606 model = &op_p4_ht2_spec;
607 return 1;
1da177e4
LT
608 }
609#endif
610
611 printk(KERN_INFO "oprofile: P4 HyperThreading detected with > 2 threads\n");
612 printk(KERN_INFO "oprofile: Reverting to timer mode.\n");
613 return 0;
614}
615
7e4e0bd5
RR
616static int force_arch_perfmon;
617static int force_cpu_type(const char *str, struct kernel_param *kp)
618{
8d7ff4f2 619 if (!strcmp(str, "arch_perfmon")) {
7e4e0bd5
RR
620 force_arch_perfmon = 1;
621 printk(KERN_INFO "oprofile: forcing architectural perfmon\n");
622 }
623
624 return 0;
625}
626module_param_call(cpu_type, force_cpu_type, NULL, NULL, 0);
1dcdb5a9 627
b75f53db 628static int __init ppro_init(char **cpu_type)
1da177e4
LT
629{
630 __u8 cpu_model = boot_cpu_data.x86_model;
259a83a8 631 struct op_x86_model_spec *spec = &op_ppro_spec; /* default */
1da177e4 632
1dcdb5a9
AK
633 if (force_arch_perfmon && cpu_has_arch_perfmon)
634 return 0;
635
45c34e05
JV
636 /*
637 * Documentation on identifying Intel processors by CPU family
638 * and model can be found in the Intel Software Developer's
639 * Manuals (SDM):
640 *
641 * http://www.intel.com/products/processor/manuals/
642 *
643 * As of May 2010 the documentation for this was in the:
644 * "Intel 64 and IA-32 Architectures Software Developer's
645 * Manual Volume 3B: System Programming Guide", "Table B-1
646 * CPUID Signature Values of DisplayFamily_DisplayModel".
647 */
4b9f12a3
LT
648 switch (cpu_model) {
649 case 0 ... 2:
650 *cpu_type = "i386/ppro";
651 break;
652 case 3 ... 5:
653 *cpu_type = "i386/pii";
654 break;
655 case 6 ... 8:
3d337c65 656 case 10 ... 11:
4b9f12a3
LT
657 *cpu_type = "i386/piii";
658 break;
659 case 9:
3d337c65 660 case 13:
4b9f12a3
LT
661 *cpu_type = "i386/p6_mobile";
662 break;
4b9f12a3 663 case 14:
64471ebe 664 *cpu_type = "i386/core";
4b9f12a3 665 break;
c33f543d
PS
666 case 0x0f:
667 case 0x16:
668 case 0x17:
bb7ab785 669 case 0x1d:
4b9f12a3
LT
670 *cpu_type = "i386/core_2";
671 break;
45c34e05 672 case 0x1a:
a7c55cbe 673 case 0x1e:
e83e452b 674 case 0x2e:
802070f5 675 spec = &op_arch_perfmon_spec;
6adf406f
AK
676 *cpu_type = "i386/core_i7";
677 break;
45c34e05 678 case 0x1c:
6adf406f
AK
679 *cpu_type = "i386/atom";
680 break;
4b9f12a3
LT
681 default:
682 /* Unknown */
1da177e4 683 return 0;
1da177e4
LT
684 }
685
802070f5 686 model = spec;
1da177e4
LT
687 return 1;
688}
689
96d0821c 690int __init op_nmi_init(struct oprofile_operations *ops)
1da177e4
LT
691{
692 __u8 vendor = boot_cpu_data.x86_vendor;
693 __u8 family = boot_cpu_data.x86;
b9917028 694 char *cpu_type = NULL;
adf5ec0b 695 int ret = 0;
1da177e4
LT
696
697 if (!cpu_has_apic)
698 return -ENODEV;
b75f53db 699
1da177e4 700 switch (vendor) {
b75f53db
CM
701 case X86_VENDOR_AMD:
702 /* Needs to be at least an Athlon (or hammer in 32bit mode) */
1da177e4 703
b75f53db 704 switch (family) {
b75f53db 705 case 6:
b75f53db
CM
706 cpu_type = "i386/athlon";
707 break;
708 case 0xf:
d20f24c6
RR
709 /*
710 * Actually it could be i386/hammer too, but
711 * give user space an consistent name.
712 */
b75f53db
CM
713 cpu_type = "x86-64/hammer";
714 break;
715 case 0x10:
b75f53db
CM
716 cpu_type = "x86-64/family10";
717 break;
12f2b261 718 case 0x11:
12f2b261
BK
719 cpu_type = "x86-64/family11h";
720 break;
3acbf084
RR
721 case 0x12:
722 cpu_type = "x86-64/family12h";
723 break;
e6341474
RR
724 case 0x14:
725 cpu_type = "x86-64/family14h";
726 break;
30570bce
RR
727 case 0x15:
728 cpu_type = "x86-64/family15h";
729 break;
d20f24c6
RR
730 default:
731 return -ENODEV;
b75f53db 732 }
d20f24c6 733 model = &op_amd_spec;
b75f53db
CM
734 break;
735
736 case X86_VENDOR_INTEL:
737 switch (family) {
738 /* Pentium IV */
739 case 0xf:
b9917028 740 p4_init(&cpu_type);
1da177e4 741 break;
b75f53db
CM
742
743 /* A P6-class processor */
744 case 6:
b9917028 745 ppro_init(&cpu_type);
1da177e4
LT
746 break;
747
748 default:
b9917028 749 break;
b75f53db 750 }
b9917028 751
e419294e
RR
752 if (cpu_type)
753 break;
754
755 if (!cpu_has_arch_perfmon)
b9917028 756 return -ENODEV;
e419294e
RR
757
758 /* use arch perfmon as fallback */
759 cpu_type = "i386/arch_perfmon";
760 model = &op_arch_perfmon_spec;
b75f53db
CM
761 break;
762
763 default:
764 return -ENODEV;
1da177e4
LT
765 }
766
270d3e1a 767 /* default values, can be overwritten by model */
6e63ea4b
RR
768 ops->create_files = nmi_create_files;
769 ops->setup = nmi_setup;
770 ops->shutdown = nmi_shutdown;
771 ops->start = nmi_start;
772 ops->stop = nmi_stop;
773 ops->cpu_type = cpu_type;
270d3e1a 774
adf5ec0b
RR
775 if (model->init)
776 ret = model->init(ops);
777 if (ret)
778 return ret;
779
52471c67
RR
780 if (!model->num_virt_counters)
781 model->num_virt_counters = model->num_counters;
782
52805144
RR
783 mux_init(ops);
784
f3c6ea1b 785 init_suspend_resume();
10f0412f 786
1da177e4
LT
787 printk(KERN_INFO "oprofile: using NMI interrupt.\n");
788 return 0;
789}
790
96d0821c 791void op_nmi_exit(void)
1da177e4 792{
f3c6ea1b 793 exit_suspend_resume();
1da177e4 794}
This page took 0.603063 seconds and 5 git commands to generate.