x86: rename the struct pt_regs members for 32/64-bit consistency
[deliverable/linux.git] / arch / x86 / kernel / time_64.c
1 /*
2 * "High Precision Event Timer" based timekeeping.
3 *
4 * Copyright (c) 1991,1992,1995 Linus Torvalds
5 * Copyright (c) 1994 Alan Modra
6 * Copyright (c) 1995 Markus Kuhn
7 * Copyright (c) 1996 Ingo Molnar
8 * Copyright (c) 1998 Andrea Arcangeli
9 * Copyright (c) 2002,2006 Vojtech Pavlik
10 * Copyright (c) 2003 Andi Kleen
11 * RTC support code taken from arch/i386/kernel/timers/time_hpet.c
12 */
13
14 #include <linux/clockchips.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/time.h>
19
20 #include <asm/i8253.h>
21 #include <asm/hpet.h>
22 #include <asm/nmi.h>
23 #include <asm/vgtod.h>
24
25 volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
26
27 unsigned long profile_pc(struct pt_regs *regs)
28 {
29 unsigned long pc = instruction_pointer(regs);
30
31 /* Assume the lock function has either no stack frame or a copy
32 of flags from PUSHF
33 Eflags always has bits 22 and up cleared unlike kernel addresses. */
34 if (!user_mode(regs) && in_lock_functions(pc)) {
35 unsigned long *sp = (unsigned long *)regs->sp;
36 if (sp[0] >> 22)
37 return sp[0];
38 if (sp[1] >> 22)
39 return sp[1];
40 }
41 return pc;
42 }
43 EXPORT_SYMBOL(profile_pc);
44
45 static irqreturn_t timer_event_interrupt(int irq, void *dev_id)
46 {
47 add_pda(irq0_irqs, 1);
48
49 global_clock_event->event_handler(global_clock_event);
50
51 return IRQ_HANDLED;
52 }
53
54 /* calibrate_cpu is used on systems with fixed rate TSCs to determine
55 * processor frequency */
56 #define TICK_COUNT 100000000
57 static unsigned int __init tsc_calibrate_cpu_khz(void)
58 {
59 int tsc_start, tsc_now;
60 int i, no_ctr_free;
61 unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0;
62 unsigned long flags;
63
64 for (i = 0; i < 4; i++)
65 if (avail_to_resrv_perfctr_nmi_bit(i))
66 break;
67 no_ctr_free = (i == 4);
68 if (no_ctr_free) {
69 i = 3;
70 rdmsrl(MSR_K7_EVNTSEL3, evntsel3);
71 wrmsrl(MSR_K7_EVNTSEL3, 0);
72 rdmsrl(MSR_K7_PERFCTR3, pmc3);
73 } else {
74 reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i);
75 reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
76 }
77 local_irq_save(flags);
78 /* start meauring cycles, incrementing from 0 */
79 wrmsrl(MSR_K7_PERFCTR0 + i, 0);
80 wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76);
81 rdtscl(tsc_start);
82 do {
83 rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now);
84 tsc_now = get_cycles_sync();
85 } while ((tsc_now - tsc_start) < TICK_COUNT);
86
87 local_irq_restore(flags);
88 if (no_ctr_free) {
89 wrmsrl(MSR_K7_EVNTSEL3, 0);
90 wrmsrl(MSR_K7_PERFCTR3, pmc3);
91 wrmsrl(MSR_K7_EVNTSEL3, evntsel3);
92 } else {
93 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
94 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
95 }
96
97 return pmc_now * tsc_khz / (tsc_now - tsc_start);
98 }
99
100 static struct irqaction irq0 = {
101 .handler = timer_event_interrupt,
102 .flags = IRQF_DISABLED | IRQF_IRQPOLL | IRQF_NOBALANCING,
103 .mask = CPU_MASK_NONE,
104 .name = "timer"
105 };
106
107 void __init time_init(void)
108 {
109 if (!hpet_enable())
110 setup_pit_timer();
111
112 setup_irq(0, &irq0);
113
114 tsc_calibrate();
115
116 cpu_khz = tsc_khz;
117 if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) &&
118 boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
119 boot_cpu_data.x86 == 16)
120 cpu_khz = tsc_calibrate_cpu_khz();
121
122 if (unsynchronized_tsc())
123 mark_tsc_unstable("TSCs unsynchronized");
124
125 if (cpu_has(&boot_cpu_data, X86_FEATURE_RDTSCP))
126 vgetcpu_mode = VGETCPU_RDTSCP;
127 else
128 vgetcpu_mode = VGETCPU_LSL;
129
130 printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n",
131 cpu_khz / 1000, cpu_khz % 1000);
132 init_tsc_clocksource();
133 }
This page took 0.035777 seconds and 5 git commands to generate.