2 * linux/arch/parisc/kernel/time.c
4 * Copyright (C) 1991, 1992, 1995 Linus Torvalds
5 * Modifications for ARM (C) 1994, 1995, 1996,1997 Russell King
6 * Copyright (C) 1999 SuSE GmbH, (Philipp Rumpf, prumpf@tux.org)
8 * 1994-07-02 Alan Modra
9 * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
10 * 1998-12-20 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
13 #include <linux/errno.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/param.h>
18 #include <linux/string.h>
20 #include <linux/interrupt.h>
21 #include <linux/time.h>
22 #include <linux/init.h>
23 #include <linux/smp.h>
24 #include <linux/profile.h>
26 #include <asm/uaccess.h>
29 #include <asm/param.h>
33 #include <linux/timex.h>
35 /* xtime and wall_jiffies keep wall-clock time */
36 extern unsigned long wall_jiffies
;
38 static long clocktick __read_mostly
; /* timer cycles per tick */
39 static long halftick __read_mostly
;
42 extern void smp_do_timer(struct pt_regs
*regs
);
45 irqreturn_t
timer_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
)
50 int cpu
= smp_processor_id();
52 profile_tick(CPU_PROFILING
, regs
);
55 /* initialize next_tick to time at last clocktick */
56 next_tick
= cpu_data
[cpu
].it_value
;
58 /* since time passes between the interrupt and the mfctl()
59 * above, it is never true that last_tick + clocktick == now. If we
60 * never miss a clocktick, we could set next_tick = last_tick + clocktick
61 * but maybe we'll miss ticks, hence the loop.
63 * Variables are *signed*.
67 while((next_tick
- now
) < halftick
) {
68 next_tick
+= clocktick
;
72 cpu_data
[cpu
].it_value
= next_tick
;
78 update_process_times(user_mode(regs
));
81 write_seqlock(&xtime_lock
);
83 write_sequnlock(&xtime_lock
);
87 /* check soft power switch status */
88 if (cpu
== 0 && !atomic_read(&power_tasklet
.count
))
89 tasklet_schedule(&power_tasklet
);
95 unsigned long profile_pc(struct pt_regs
*regs
)
97 unsigned long pc
= instruction_pointer(regs
);
99 if (regs
->gr
[0] & PSW_N
)
103 if (in_lock_functions(pc
))
109 EXPORT_SYMBOL(profile_pc
);
112 /*** converted from ia64 ***/
114 * Return the number of micro-seconds that elapsed since the last
115 * update to wall time (aka xtime aka wall_jiffies). The xtime_lock
116 * must be at least read-locked when calling this routine.
118 static inline unsigned long
123 * FIXME: This won't work on smp because jiffies are updated by cpu 0.
124 * Once parisc-linux learns the cr16 difference between processors,
125 * this could be made to work.
130 /* it_value is the intended time of the next tick */
131 last_tick
= cpu_data
[smp_processor_id()].it_value
;
133 /* Subtract one tick and account for possible difference between
134 * when we expected the tick and when it actually arrived.
137 last_tick
-= clocktick
* (jiffies
- wall_jiffies
+ 1);
138 elapsed_cycles
= mfctl(16) - last_tick
;
140 /* the precision of this math could be improved */
141 return elapsed_cycles
/ (PAGE0
->mem_10msec
/ 10000);
148 do_gettimeofday (struct timeval
*tv
)
150 unsigned long flags
, seq
, usec
, sec
;
153 seq
= read_seqbegin_irqsave(&xtime_lock
, flags
);
154 usec
= gettimeoffset();
156 usec
+= (xtime
.tv_nsec
/ 1000);
157 } while (read_seqretry_irqrestore(&xtime_lock
, seq
, flags
));
159 if (unlikely(usec
> LONG_MAX
)) {
160 /* This can happen if the gettimeoffset adjustment is
161 * negative and xtime.tv_nsec is smaller than the
163 printk(KERN_ERR
"do_gettimeofday() spurious xtime.tv_nsec of %ld\n", usec
);
164 usec
+= USEC_PER_SEC
;
166 /* This should never happen, it means the negative
167 * time adjustment was more than a second, so there's
168 * something seriously wrong */
169 BUG_ON(usec
> LONG_MAX
);
173 while (usec
>= USEC_PER_SEC
) {
174 usec
-= USEC_PER_SEC
;
182 EXPORT_SYMBOL(do_gettimeofday
);
185 do_settimeofday (struct timespec
*tv
)
187 time_t wtm_sec
, sec
= tv
->tv_sec
;
188 long wtm_nsec
, nsec
= tv
->tv_nsec
;
190 if ((unsigned long)tv
->tv_nsec
>= NSEC_PER_SEC
)
193 write_seqlock_irq(&xtime_lock
);
196 * This is revolting. We need to set "xtime"
197 * correctly. However, the value in this location is
198 * the value at the most recent update of wall time.
199 * Discover what correction gettimeofday would have
200 * done, and then undo it!
202 nsec
-= gettimeoffset() * 1000;
204 wtm_sec
= wall_to_monotonic
.tv_sec
+ (xtime
.tv_sec
- sec
);
205 wtm_nsec
= wall_to_monotonic
.tv_nsec
+ (xtime
.tv_nsec
- nsec
);
207 set_normalized_timespec(&xtime
, sec
, nsec
);
208 set_normalized_timespec(&wall_to_monotonic
, wtm_sec
, wtm_nsec
);
212 write_sequnlock_irq(&xtime_lock
);
216 EXPORT_SYMBOL(do_settimeofday
);
219 * XXX: We can do better than this.
220 * Returns nanoseconds
223 unsigned long long sched_clock(void)
225 return (unsigned long long)jiffies
* (1000000000 / HZ
);
229 void __init
time_init(void)
231 unsigned long next_tick
;
232 static struct pdc_tod tod_data
;
234 clocktick
= (100 * PAGE0
->mem_10msec
) / HZ
;
235 halftick
= clocktick
/ 2;
237 /* Setup clock interrupt timing */
239 next_tick
= mfctl(16);
240 next_tick
+= clocktick
;
241 cpu_data
[smp_processor_id()].it_value
= next_tick
;
243 /* kick off Itimer (CR16) */
244 mtctl(next_tick
, 16);
246 if(pdc_tod_read(&tod_data
) == 0) {
247 write_seqlock_irq(&xtime_lock
);
248 xtime
.tv_sec
= tod_data
.tod_sec
;
249 xtime
.tv_nsec
= tod_data
.tod_usec
* 1000;
250 set_normalized_timespec(&wall_to_monotonic
,
251 -xtime
.tv_sec
, -xtime
.tv_nsec
);
252 write_sequnlock_irq(&xtime_lock
);
254 printk(KERN_ERR
"Error reading tod clock\n");
This page took 0.044068 seconds and 5 git commands to generate.