x86/oprofile: Use per_cpu() instead of __get_cpu_var()
[deliverable/linux.git] / arch / x86 / oprofile / nmi_int.c
CommitLineData
1da177e4
LT
1/**
2 * @file nmi_int.c
3 *
4d4036e0 4 * @remark Copyright 2002-2009 OProfile authors
1da177e4
LT
5 * @remark Read the file COPYING
6 *
7 * @author John Levon <levon@movementarian.org>
adf5ec0b 8 * @author Robert Richter <robert.richter@amd.com>
4d4036e0
JY
9 * @author Barry Kasindorf <barry.kasindorf@amd.com>
10 * @author Jason Yeh <jason.yeh@amd.com>
11 * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
1da177e4
LT
12 */
13
14#include <linux/init.h>
15#include <linux/notifier.h>
16#include <linux/smp.h>
17#include <linux/oprofile.h>
18#include <linux/sysdev.h>
19#include <linux/slab.h>
1cfcea1b 20#include <linux/moduleparam.h>
1eeb66a1 21#include <linux/kdebug.h>
80a8c9ff 22#include <linux/cpu.h>
1da177e4
LT
23#include <asm/nmi.h>
24#include <asm/msr.h>
25#include <asm/apic.h>
b75f53db 26
1da177e4
LT
27#include "op_counter.h"
28#include "op_x86_model.h"
2fbe7b25 29
4d4036e0
JY
30
31#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
32DEFINE_PER_CPU(int, switch_index);
33#endif
34
35
b75f53db 36static struct op_x86_model_spec const *model;
d18d00f5
MT
37static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
38static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
2fbe7b25 39
1da177e4
LT
40/* 0 == registered but off, 1 == registered and on */
41static int nmi_enabled = 0;
42
4d4036e0
JY
43
44#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
45extern atomic_t multiplex_counter;
46#endif
47
48struct op_counter_config counter_config[OP_MAX_COUNTER];
49
3370d358
RR
50/* common functions */
51
52u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
53 struct op_counter_config *counter_config)
54{
55 u64 val = 0;
56 u16 event = (u16)counter_config->event;
57
58 val |= ARCH_PERFMON_EVENTSEL_INT;
59 val |= counter_config->user ? ARCH_PERFMON_EVENTSEL_USR : 0;
60 val |= counter_config->kernel ? ARCH_PERFMON_EVENTSEL_OS : 0;
61 val |= (counter_config->unit_mask & 0xFF) << 8;
62 event &= model->event_mask ? model->event_mask : 0xFF;
63 val |= event & 0xFF;
64 val |= (event & 0x0F00) << 24;
65
66 return val;
67}
68
69
c7c19f8e
AB
70static int profile_exceptions_notify(struct notifier_block *self,
71 unsigned long val, void *data)
1da177e4 72{
2fbe7b25
DZ
73 struct die_args *args = (struct die_args *)data;
74 int ret = NOTIFY_DONE;
75 int cpu = smp_processor_id();
76
b75f53db 77 switch (val) {
2fbe7b25 78 case DIE_NMI:
5b75af0a
MG
79 case DIE_NMI_IPI:
80 model->check_ctrs(args->regs, &per_cpu(cpu_msrs, cpu));
81 ret = NOTIFY_STOP;
2fbe7b25
DZ
82 break;
83 default:
84 break;
85 }
86 return ret;
1da177e4 87}
2fbe7b25 88
b75f53db 89static void nmi_cpu_save_registers(struct op_msrs *msrs)
1da177e4 90{
b75f53db
CM
91 struct op_msr *counters = msrs->counters;
92 struct op_msr *controls = msrs->controls;
1da177e4
LT
93 unsigned int i;
94
1a245c45 95 for (i = 0; i < model->num_counters; ++i) {
95e74e62
RR
96 if (counters[i].addr)
97 rdmsrl(counters[i].addr, counters[i].saved);
1da177e4 98 }
b75f53db 99
1a245c45 100 for (i = 0; i < model->num_controls; ++i) {
95e74e62
RR
101 if (controls[i].addr)
102 rdmsrl(controls[i].addr, controls[i].saved);
1da177e4
LT
103 }
104}
105
1da177e4
LT
106static void free_msrs(void)
107{
108 int i;
c8912599 109 for_each_possible_cpu(i) {
d18d00f5
MT
110 kfree(per_cpu(cpu_msrs, i).counters);
111 per_cpu(cpu_msrs, i).counters = NULL;
112 kfree(per_cpu(cpu_msrs, i).controls);
113 per_cpu(cpu_msrs, i).controls = NULL;
4d4036e0
JY
114
115#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
116 kfree(per_cpu(cpu_msrs, i).multiplex);
117 per_cpu(cpu_msrs, i).multiplex = NULL;
118#endif
1da177e4
LT
119 }
120}
121
1da177e4
LT
122static int allocate_msrs(void)
123{
4c168eaf 124 int success = 1;
1da177e4
LT
125 size_t controls_size = sizeof(struct op_msr) * model->num_controls;
126 size_t counters_size = sizeof(struct op_msr) * model->num_counters;
4d4036e0
JY
127#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
128 size_t multiplex_size = sizeof(struct op_msr) * model->num_virt_counters;
129#endif
1da177e4 130
4c168eaf 131 int i;
0939c17c 132 for_each_possible_cpu(i) {
d18d00f5
MT
133 per_cpu(cpu_msrs, i).counters = kmalloc(counters_size,
134 GFP_KERNEL);
135 if (!per_cpu(cpu_msrs, i).counters) {
1da177e4
LT
136 success = 0;
137 break;
138 }
4c168eaf
RR
139 per_cpu(cpu_msrs, i).controls = kmalloc(controls_size,
140 GFP_KERNEL);
d18d00f5 141 if (!per_cpu(cpu_msrs, i).controls) {
1da177e4
LT
142 success = 0;
143 break;
144 }
4d4036e0
JY
145#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
146 per_cpu(cpu_msrs, i).multiplex =
147 kmalloc(multiplex_size, GFP_KERNEL);
148 if (!per_cpu(cpu_msrs, i).multiplex) {
149 success = 0;
150 break;
151 }
152#endif
1da177e4
LT
153 }
154
155 if (!success)
156 free_msrs();
157
158 return success;
159}
160
4d4036e0
JY
161#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
162
163static void nmi_setup_cpu_mux(struct op_msrs const * const msrs)
164{
165 int i;
166 struct op_msr *multiplex = msrs->multiplex;
167
168 for (i = 0; i < model->num_virt_counters; ++i) {
169 if (counter_config[i].enabled) {
170 multiplex[i].saved = -(u64)counter_config[i].count;
171 } else {
172 multiplex[i].addr = 0;
173 multiplex[i].saved = 0;
174 }
175 }
176}
177
178#endif
179
b75f53db 180static void nmi_cpu_setup(void *dummy)
1da177e4
LT
181{
182 int cpu = smp_processor_id();
d18d00f5 183 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
44ab9a6b 184 nmi_cpu_save_registers(msrs);
1da177e4 185 spin_lock(&oprofilefs_lock);
ef8828dd 186 model->setup_ctrs(model, msrs);
4d4036e0
JY
187#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
188 nmi_setup_cpu_mux(msrs);
189#endif
1da177e4 190 spin_unlock(&oprofilefs_lock);
d18d00f5 191 per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
1da177e4
LT
192 apic_write(APIC_LVTPC, APIC_DM_NMI);
193}
194
2fbe7b25
DZ
195static struct notifier_block profile_exceptions_nb = {
196 .notifier_call = profile_exceptions_notify,
197 .next = NULL,
5b75af0a 198 .priority = 2
2fbe7b25 199};
1da177e4
LT
200
201static int nmi_setup(void)
202{
b75f53db 203 int err = 0;
6c977aad 204 int cpu;
2fbe7b25 205
1da177e4
LT
206 if (!allocate_msrs())
207 return -ENOMEM;
208
b75f53db
CM
209 err = register_die_notifier(&profile_exceptions_nb);
210 if (err) {
1da177e4 211 free_msrs();
2fbe7b25 212 return err;
1da177e4 213 }
2fbe7b25 214
4c168eaf 215 /* We need to serialize save and setup for HT because the subset
1da177e4
LT
216 * of msrs are distinct for save and setup operations
217 */
6c977aad
AK
218
219 /* Assume saved/restored counters are the same on all CPUs */
d18d00f5 220 model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
b75f53db 221 for_each_possible_cpu(cpu) {
0939c17c 222 if (cpu != 0) {
d18d00f5
MT
223 memcpy(per_cpu(cpu_msrs, cpu).counters,
224 per_cpu(cpu_msrs, 0).counters,
0939c17c
CW
225 sizeof(struct op_msr) * model->num_counters);
226
d18d00f5
MT
227 memcpy(per_cpu(cpu_msrs, cpu).controls,
228 per_cpu(cpu_msrs, 0).controls,
0939c17c 229 sizeof(struct op_msr) * model->num_controls);
4d4036e0
JY
230#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
231 memcpy(per_cpu(cpu_msrs, cpu).multiplex,
232 per_cpu(cpu_msrs, 0).multiplex,
233 sizeof(struct op_msr) * model->num_virt_counters);
234#endif
0939c17c 235 }
6c977aad 236 }
15c8b6c1 237 on_each_cpu(nmi_cpu_setup, NULL, 1);
1da177e4
LT
238 nmi_enabled = 1;
239 return 0;
240}
241
4d4036e0
JY
242#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
243
244static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs)
245{
246 unsigned int si = __get_cpu_var(switch_index);
247 struct op_msr *multiplex = msrs->multiplex;
248 unsigned int i;
249
250 for (i = 0; i < model->num_counters; ++i) {
251 int offset = i + si;
252 if (multiplex[offset].addr) {
253 rdmsrl(multiplex[offset].addr,
254 multiplex[offset].saved);
255 }
256 }
257}
258
259static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs)
260{
261 unsigned int si = __get_cpu_var(switch_index);
262 struct op_msr *multiplex = msrs->multiplex;
263 unsigned int i;
264
265 for (i = 0; i < model->num_counters; ++i) {
266 int offset = i + si;
267 if (multiplex[offset].addr) {
268 wrmsrl(multiplex[offset].addr,
269 multiplex[offset].saved);
270 }
271 }
272}
273
274#endif
275
44ab9a6b 276static void nmi_cpu_restore_registers(struct op_msrs *msrs)
1da177e4 277{
b75f53db
CM
278 struct op_msr *counters = msrs->counters;
279 struct op_msr *controls = msrs->controls;
1da177e4
LT
280 unsigned int i;
281
1a245c45 282 for (i = 0; i < model->num_controls; ++i) {
95e74e62
RR
283 if (controls[i].addr)
284 wrmsrl(controls[i].addr, controls[i].saved);
1da177e4 285 }
b75f53db 286
1a245c45 287 for (i = 0; i < model->num_counters; ++i) {
95e74e62
RR
288 if (counters[i].addr)
289 wrmsrl(counters[i].addr, counters[i].saved);
1da177e4
LT
290 }
291}
1da177e4 292
b75f53db 293static void nmi_cpu_shutdown(void *dummy)
1da177e4
LT
294{
295 unsigned int v;
296 int cpu = smp_processor_id();
82a22528 297 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
b75f53db 298
1da177e4
LT
299 /* restoring APIC_LVTPC can trigger an apic error because the delivery
300 * mode and vector nr combination can be illegal. That's by design: on
301 * power on apic lvt contain a zero vector nr which are legal only for
302 * NMI delivery mode. So inhibit apic err before restoring lvtpc
303 */
304 v = apic_read(APIC_LVTERR);
305 apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
d18d00f5 306 apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
1da177e4 307 apic_write(APIC_LVTERR, v);
44ab9a6b 308 nmi_cpu_restore_registers(msrs);
4d4036e0 309#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
82a22528 310 per_cpu(switch_index, cpu) = 0;
4d4036e0 311#endif
1da177e4
LT
312}
313
1da177e4
LT
314static void nmi_shutdown(void)
315{
b61e06f2
AR
316 struct op_msrs *msrs;
317
1da177e4 318 nmi_enabled = 0;
15c8b6c1 319 on_each_cpu(nmi_cpu_shutdown, NULL, 1);
2fbe7b25 320 unregister_die_notifier(&profile_exceptions_nb);
b61e06f2 321 msrs = &get_cpu_var(cpu_msrs);
d18d00f5 322 model->shutdown(msrs);
1da177e4 323 free_msrs();
93e1ade5 324 put_cpu_var(cpu_msrs);
1da177e4
LT
325}
326
b75f53db 327static void nmi_cpu_start(void *dummy)
1da177e4 328{
d18d00f5 329 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
1da177e4
LT
330 model->start(msrs);
331}
1da177e4
LT
332
333static int nmi_start(void)
334{
15c8b6c1 335 on_each_cpu(nmi_cpu_start, NULL, 1);
1da177e4
LT
336 return 0;
337}
b75f53db
CM
338
339static void nmi_cpu_stop(void *dummy)
1da177e4 340{
d18d00f5 341 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
1da177e4
LT
342 model->stop(msrs);
343}
b75f53db 344
1da177e4
LT
345static void nmi_stop(void)
346{
15c8b6c1 347 on_each_cpu(nmi_cpu_stop, NULL, 1);
1da177e4
LT
348}
349
b75f53db 350static int nmi_create_files(struct super_block *sb, struct dentry *root)
1da177e4
LT
351{
352 unsigned int i;
353
4d4036e0 354 for (i = 0; i < model->num_virt_counters; ++i) {
b75f53db 355 struct dentry *dir;
0c6856f7 356 char buf[4];
b75f53db 357
4d4036e0 358#ifndef CONFIG_OPROFILE_EVENT_MULTIPLEX
b75f53db 359 /* quick little hack to _not_ expose a counter if it is not
cb9c448c
DZ
360 * available for use. This should protect userspace app.
361 * NOTE: assumes 1:1 mapping here (that counters are organized
362 * sequentially in their struct assignment).
363 */
364 if (unlikely(!avail_to_resrv_perfctr_nmi_bit(i)))
365 continue;
4d4036e0 366#endif /* CONFIG_OPROFILE_EVENT_MULTIPLEX */
cb9c448c 367
0c6856f7 368 snprintf(buf, sizeof(buf), "%d", i);
1da177e4 369 dir = oprofilefs_mkdir(sb, root, buf);
b75f53db
CM
370 oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
371 oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
372 oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
373 oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
374 oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
375 oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
1da177e4
LT
376 }
377
378 return 0;
379}
b75f53db 380
4d4036e0
JY
381#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
382
383static void nmi_cpu_switch(void *dummy)
384{
385 int cpu = smp_processor_id();
386 int si = per_cpu(switch_index, cpu);
387 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
388
389 nmi_cpu_stop(NULL);
390 nmi_cpu_save_mpx_registers(msrs);
391
392 /* move to next set */
393 si += model->num_counters;
394 if ((si > model->num_virt_counters) || (counter_config[si].count == 0))
395 per_cpu(switch_index, cpu) = 0;
396 else
397 per_cpu(switch_index, cpu) = si;
398
399 model->switch_ctrl(model, msrs);
400 nmi_cpu_restore_mpx_registers(msrs);
401
402 nmi_cpu_start(NULL);
403}
404
405
406/*
407 * Quick check to see if multiplexing is necessary.
408 * The check should be sufficient since counters are used
409 * in ordre.
410 */
411static int nmi_multiplex_on(void)
412{
413 return counter_config[model->num_counters].count ? 0 : -EINVAL;
414}
415
416static int nmi_switch_event(void)
417{
418 if (!model->switch_ctrl)
419 return -ENOSYS; /* not implemented */
420 if (nmi_multiplex_on() < 0)
421 return -EINVAL; /* not necessary */
422
423 on_each_cpu(nmi_cpu_switch, NULL, 1);
424
425 atomic_inc(&multiplex_counter);
426
427 return 0;
428}
429
430#endif
431
69046d43
RR
432#ifdef CONFIG_SMP
433static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
434 void *data)
435{
436 int cpu = (unsigned long)data;
437 switch (action) {
438 case CPU_DOWN_FAILED:
439 case CPU_ONLINE:
440 smp_call_function_single(cpu, nmi_cpu_start, NULL, 0);
441 break;
442 case CPU_DOWN_PREPARE:
443 smp_call_function_single(cpu, nmi_cpu_stop, NULL, 1);
444 break;
445 }
446 return NOTIFY_DONE;
447}
448
449static struct notifier_block oprofile_cpu_nb = {
450 .notifier_call = oprofile_cpu_notifier
451};
452#endif
453
454#ifdef CONFIG_PM
455
456static int nmi_suspend(struct sys_device *dev, pm_message_t state)
457{
458 /* Only one CPU left, just stop that one */
459 if (nmi_enabled == 1)
460 nmi_cpu_stop(NULL);
461 return 0;
462}
463
464static int nmi_resume(struct sys_device *dev)
465{
466 if (nmi_enabled == 1)
467 nmi_cpu_start(NULL);
468 return 0;
469}
470
471static struct sysdev_class oprofile_sysclass = {
472 .name = "oprofile",
473 .resume = nmi_resume,
474 .suspend = nmi_suspend,
475};
476
477static struct sys_device device_oprofile = {
478 .id = 0,
479 .cls = &oprofile_sysclass,
480};
481
482static int __init init_sysfs(void)
483{
484 int error;
485
486 error = sysdev_class_register(&oprofile_sysclass);
487 if (!error)
488 error = sysdev_register(&device_oprofile);
489 return error;
490}
491
492static void exit_sysfs(void)
493{
494 sysdev_unregister(&device_oprofile);
495 sysdev_class_unregister(&oprofile_sysclass);
496}
497
498#else
499#define init_sysfs() do { } while (0)
500#define exit_sysfs() do { } while (0)
501#endif /* CONFIG_PM */
502
b75f53db 503static int __init p4_init(char **cpu_type)
1da177e4
LT
504{
505 __u8 cpu_model = boot_cpu_data.x86_model;
506
1f3d7b60 507 if (cpu_model > 6 || cpu_model == 5)
1da177e4
LT
508 return 0;
509
510#ifndef CONFIG_SMP
511 *cpu_type = "i386/p4";
512 model = &op_p4_spec;
513 return 1;
514#else
515 switch (smp_num_siblings) {
b75f53db
CM
516 case 1:
517 *cpu_type = "i386/p4";
518 model = &op_p4_spec;
519 return 1;
520
521 case 2:
522 *cpu_type = "i386/p4-ht";
523 model = &op_p4_ht2_spec;
524 return 1;
1da177e4
LT
525 }
526#endif
527
528 printk(KERN_INFO "oprofile: P4 HyperThreading detected with > 2 threads\n");
529 printk(KERN_INFO "oprofile: Reverting to timer mode.\n");
530 return 0;
531}
532
7e4e0bd5
RR
533static int force_arch_perfmon;
534static int force_cpu_type(const char *str, struct kernel_param *kp)
535{
8d7ff4f2 536 if (!strcmp(str, "arch_perfmon")) {
7e4e0bd5
RR
537 force_arch_perfmon = 1;
538 printk(KERN_INFO "oprofile: forcing architectural perfmon\n");
539 }
540
541 return 0;
542}
543module_param_call(cpu_type, force_cpu_type, NULL, NULL, 0);
1dcdb5a9 544
b75f53db 545static int __init ppro_init(char **cpu_type)
1da177e4
LT
546{
547 __u8 cpu_model = boot_cpu_data.x86_model;
802070f5 548 struct op_x86_model_spec const *spec = &op_ppro_spec; /* default */
1da177e4 549
1dcdb5a9
AK
550 if (force_arch_perfmon && cpu_has_arch_perfmon)
551 return 0;
552
4b9f12a3
LT
553 switch (cpu_model) {
554 case 0 ... 2:
555 *cpu_type = "i386/ppro";
556 break;
557 case 3 ... 5:
558 *cpu_type = "i386/pii";
559 break;
560 case 6 ... 8:
3d337c65 561 case 10 ... 11:
4b9f12a3
LT
562 *cpu_type = "i386/piii";
563 break;
564 case 9:
3d337c65 565 case 13:
4b9f12a3
LT
566 *cpu_type = "i386/p6_mobile";
567 break;
4b9f12a3 568 case 14:
64471ebe 569 *cpu_type = "i386/core";
4b9f12a3
LT
570 break;
571 case 15: case 23:
572 *cpu_type = "i386/core_2";
573 break;
6adf406f 574 case 26:
802070f5 575 spec = &op_arch_perfmon_spec;
6adf406f
AK
576 *cpu_type = "i386/core_i7";
577 break;
578 case 28:
579 *cpu_type = "i386/atom";
580 break;
4b9f12a3
LT
581 default:
582 /* Unknown */
1da177e4 583 return 0;
1da177e4
LT
584 }
585
802070f5 586 model = spec;
1da177e4
LT
587 return 1;
588}
589
405ae7d3 590/* in order to get sysfs right */
1da177e4
LT
591static int using_nmi;
592
96d0821c 593int __init op_nmi_init(struct oprofile_operations *ops)
1da177e4
LT
594{
595 __u8 vendor = boot_cpu_data.x86_vendor;
596 __u8 family = boot_cpu_data.x86;
b9917028 597 char *cpu_type = NULL;
adf5ec0b 598 int ret = 0;
1da177e4
LT
599
600 if (!cpu_has_apic)
601 return -ENODEV;
b75f53db 602
1da177e4 603 switch (vendor) {
b75f53db
CM
604 case X86_VENDOR_AMD:
605 /* Needs to be at least an Athlon (or hammer in 32bit mode) */
1da177e4 606
b75f53db 607 switch (family) {
b75f53db 608 case 6:
b75f53db
CM
609 cpu_type = "i386/athlon";
610 break;
611 case 0xf:
d20f24c6
RR
612 /*
613 * Actually it could be i386/hammer too, but
614 * give user space an consistent name.
615 */
b75f53db
CM
616 cpu_type = "x86-64/hammer";
617 break;
618 case 0x10:
b75f53db
CM
619 cpu_type = "x86-64/family10";
620 break;
12f2b261 621 case 0x11:
12f2b261
BK
622 cpu_type = "x86-64/family11h";
623 break;
d20f24c6
RR
624 default:
625 return -ENODEV;
b75f53db 626 }
d20f24c6 627 model = &op_amd_spec;
b75f53db
CM
628 break;
629
630 case X86_VENDOR_INTEL:
631 switch (family) {
632 /* Pentium IV */
633 case 0xf:
b9917028 634 p4_init(&cpu_type);
1da177e4 635 break;
b75f53db
CM
636
637 /* A P6-class processor */
638 case 6:
b9917028 639 ppro_init(&cpu_type);
1da177e4
LT
640 break;
641
642 default:
b9917028 643 break;
b75f53db 644 }
b9917028 645
e419294e
RR
646 if (cpu_type)
647 break;
648
649 if (!cpu_has_arch_perfmon)
b9917028 650 return -ENODEV;
e419294e
RR
651
652 /* use arch perfmon as fallback */
653 cpu_type = "i386/arch_perfmon";
654 model = &op_arch_perfmon_spec;
b75f53db
CM
655 break;
656
657 default:
658 return -ENODEV;
1da177e4
LT
659 }
660
80a8c9ff
AK
661#ifdef CONFIG_SMP
662 register_cpu_notifier(&oprofile_cpu_nb);
663#endif
270d3e1a 664 /* default values, can be overwritten by model */
4d4036e0
JY
665#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
666 __raw_get_cpu_var(switch_index) = 0;
667#endif
6e63ea4b
RR
668 ops->create_files = nmi_create_files;
669 ops->setup = nmi_setup;
670 ops->shutdown = nmi_shutdown;
671 ops->start = nmi_start;
672 ops->stop = nmi_stop;
673 ops->cpu_type = cpu_type;
4d4036e0
JY
674#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
675 ops->switch_events = nmi_switch_event;
676#endif
270d3e1a 677
adf5ec0b
RR
678 if (model->init)
679 ret = model->init(ops);
680 if (ret)
681 return ret;
682
405ae7d3 683 init_sysfs();
1da177e4 684 using_nmi = 1;
1da177e4
LT
685 printk(KERN_INFO "oprofile: using NMI interrupt.\n");
686 return 0;
687}
688
96d0821c 689void op_nmi_exit(void)
1da177e4 690{
80a8c9ff 691 if (using_nmi) {
405ae7d3 692 exit_sysfs();
80a8c9ff
AK
693#ifdef CONFIG_SMP
694 unregister_cpu_notifier(&oprofile_cpu_nb);
695#endif
696 }
adf5ec0b
RR
697 if (model->exit)
698 model->exit();
1da177e4 699}
This page took 0.737186 seconds and 5 git commands to generate.