2 * Common time routines among all ppc machines.
4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
5 * Paul Mackerras' version and mine for PReP and Pmac.
6 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
7 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
9 * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
10 * to make clock more stable (2.4.0-test5). The only thing
11 * that this code assumes is that the timebases have been synchronized
12 * by firmware on SMP and are never stopped (never do sleep
13 * on SMP then, nap and doze are OK).
15 * Speeded up do_gettimeofday by getting rid of references to
16 * xtime (which required locks for consistency). (mikejc@us.ibm.com)
18 * TODO (not necessarily in this file):
19 * - improve precision and reproducibility of timebase frequency
20 * measurement at boot time. (for iSeries, we calibrate the timebase
21 * against the Titan chip's clock.)
22 * - for astronomical applications: add a new function to get
23 * non ambiguous timestamps even around leap seconds. This needs
24 * a new timestamp format and a good name.
26 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
27 * "A Kernel Model for Precision Timekeeping" by Dave Mills
29 * This program is free software; you can redistribute it and/or
30 * modify it under the terms of the GNU General Public License
31 * as published by the Free Software Foundation; either version
32 * 2 of the License, or (at your option) any later version.
35 #include <linux/config.h>
36 #include <linux/errno.h>
37 #include <linux/module.h>
38 #include <linux/sched.h>
39 #include <linux/kernel.h>
40 #include <linux/param.h>
41 #include <linux/string.h>
43 #include <linux/interrupt.h>
44 #include <linux/timex.h>
45 #include <linux/kernel_stat.h>
46 #include <linux/time.h>
47 #include <linux/init.h>
48 #include <linux/profile.h>
49 #include <linux/cpu.h>
50 #include <linux/security.h>
51 #include <linux/percpu.h>
52 #include <linux/rtc.h>
53 #include <linux/jiffies.h>
54 #include <linux/posix-timers.h>
57 #include <asm/processor.h>
58 #include <asm/nvram.h>
59 #include <asm/cache.h>
60 #include <asm/machdep.h>
61 #include <asm/uaccess.h>
65 #include <asm/div64.h>
67 #include <asm/vdso_datapage.h>
69 #include <asm/firmware.h>
71 #ifdef CONFIG_PPC_ISERIES
72 #include <asm/iseries/it_lp_queue.h>
73 #include <asm/iseries/hv_call_xm.h>
77 /* keep track of when we need to update the rtc */
78 time_t last_rtc_update
;
79 #ifdef CONFIG_PPC_ISERIES
80 unsigned long iSeries_recal_titan
= 0;
81 unsigned long iSeries_recal_tb
= 0;
82 static unsigned long first_settimeofday
= 1;
85 /* The decrementer counts down by 128 every 128ns on a 601. */
86 #define DECREMENTER_COUNT_601 (1000000000 / HZ)
88 #define XSEC_PER_SEC (1024*1024)
91 #define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)
93 /* compute ((xsec << 12) * max) >> 32 */
94 #define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)
97 unsigned long tb_ticks_per_jiffy
;
98 unsigned long tb_ticks_per_usec
= 100; /* sane default */
99 EXPORT_SYMBOL(tb_ticks_per_usec
);
100 unsigned long tb_ticks_per_sec
;
101 EXPORT_SYMBOL(tb_ticks_per_sec
); /* for cputime_t conversions */
105 #define TICKLEN_SCALE (SHIFT_SCALE - 10)
106 u64 last_tick_len
; /* units are ns / 2^TICKLEN_SCALE */
107 u64 ticklen_to_xs
; /* 0.64 fraction */
109 /* If last_tick_len corresponds to about 1/HZ seconds, then
110 last_tick_len << TICKLEN_SHIFT will be about 2^63. */
111 #define TICKLEN_SHIFT (63 - 30 - TICKLEN_SCALE + SHIFT_HZ)
113 DEFINE_SPINLOCK(rtc_lock
);
114 EXPORT_SYMBOL_GPL(rtc_lock
);
117 unsigned tb_to_ns_shift
;
119 struct gettimeofday_struct do_gtod
;
121 extern unsigned long wall_jiffies
;
123 extern struct timezone sys_tz
;
124 static long timezone_offset
;
126 unsigned long ppc_proc_freq
;
127 unsigned long ppc_tb_freq
;
129 u64 tb_last_jiffy __cacheline_aligned_in_smp
;
130 unsigned long tb_last_stamp
;
133 * Note that on ppc32 this only stores the bottom 32 bits of
134 * the timebase value, but that's enough to tell when a jiffy
137 DEFINE_PER_CPU(unsigned long, last_jiffy
);
139 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
141 * Factors for converting from cputime_t (timebase ticks) to
142 * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds).
143 * These are all stored as 0.64 fixed-point binary fractions.
145 u64 __cputime_jiffies_factor
;
146 EXPORT_SYMBOL(__cputime_jiffies_factor
);
147 u64 __cputime_msec_factor
;
148 EXPORT_SYMBOL(__cputime_msec_factor
);
149 u64 __cputime_sec_factor
;
150 EXPORT_SYMBOL(__cputime_sec_factor
);
151 u64 __cputime_clockt_factor
;
152 EXPORT_SYMBOL(__cputime_clockt_factor
);
154 static void calc_cputime_factors(void)
156 struct div_result res
;
158 div128_by_32(HZ
, 0, tb_ticks_per_sec
, &res
);
159 __cputime_jiffies_factor
= res
.result_low
;
160 div128_by_32(1000, 0, tb_ticks_per_sec
, &res
);
161 __cputime_msec_factor
= res
.result_low
;
162 div128_by_32(1, 0, tb_ticks_per_sec
, &res
);
163 __cputime_sec_factor
= res
.result_low
;
164 div128_by_32(USER_HZ
, 0, tb_ticks_per_sec
, &res
);
165 __cputime_clockt_factor
= res
.result_low
;
169 * Read the PURR on systems that have it, otherwise the timebase.
171 static u64
read_purr(void)
173 if (cpu_has_feature(CPU_FTR_PURR
))
174 return mfspr(SPRN_PURR
);
179 * Account time for a transition between system, hard irq
182 void account_system_vtime(struct task_struct
*tsk
)
187 local_irq_save(flags
);
189 delta
= now
- get_paca()->startpurr
;
190 get_paca()->startpurr
= now
;
191 if (!in_interrupt()) {
192 delta
+= get_paca()->system_time
;
193 get_paca()->system_time
= 0;
195 account_system_time(tsk
, 0, delta
);
196 local_irq_restore(flags
);
200 * Transfer the user and system times accumulated in the paca
201 * by the exception entry and exit code to the generic process
202 * user and system time records.
203 * Must be called with interrupts disabled.
205 void account_process_vtime(struct task_struct
*tsk
)
209 utime
= get_paca()->user_time
;
210 get_paca()->user_time
= 0;
211 account_user_time(tsk
, utime
);
214 static void account_process_time(struct pt_regs
*regs
)
216 int cpu
= smp_processor_id();
218 account_process_vtime(current
);
220 if (rcu_pending(cpu
))
221 rcu_check_callbacks(cpu
, user_mode(regs
));
223 run_posix_cpu_timers(current
);
226 #ifdef CONFIG_PPC_SPLPAR
228 * Stuff for accounting stolen time.
230 struct cpu_purr_data
{
231 int initialized
; /* thread is running */
232 u64 tb0
; /* timebase at origin time */
233 u64 purr0
; /* PURR at origin time */
234 u64 tb
; /* last TB value read */
235 u64 purr
; /* last PURR value read */
236 u64 stolen
; /* stolen time so far */
240 static DEFINE_PER_CPU(struct cpu_purr_data
, cpu_purr_data
);
242 static void snapshot_tb_and_purr(void *data
)
244 struct cpu_purr_data
*p
= &__get_cpu_var(cpu_purr_data
);
247 p
->purr0
= mfspr(SPRN_PURR
);
255 * Called during boot when all cpus have come up.
257 void snapshot_timebases(void)
261 if (!cpu_has_feature(CPU_FTR_PURR
))
263 for_each_possible_cpu(cpu
)
264 spin_lock_init(&per_cpu(cpu_purr_data
, cpu
).lock
);
265 on_each_cpu(snapshot_tb_and_purr
, NULL
, 0, 1);
268 void calculate_steal_time(void)
272 struct cpu_purr_data
*p0
, *pme
, *phim
;
275 if (!cpu_has_feature(CPU_FTR_PURR
))
277 cpu
= smp_processor_id();
278 pme
= &per_cpu(cpu_purr_data
, cpu
);
279 if (!pme
->initialized
)
280 return; /* this can happen in early boot */
281 p0
= &per_cpu(cpu_purr_data
, cpu
& ~1);
282 phim
= &per_cpu(cpu_purr_data
, cpu
^ 1);
283 spin_lock(&p0
->lock
);
285 purr
= mfspr(SPRN_PURR
) - pme
->purr0
;
286 if (!phim
->initialized
|| !cpu_online(cpu
^ 1)) {
287 stolen
= (tb
- pme
->tb
) - (purr
- pme
->purr
);
292 stolen
= phim
->tb
- t0
- phim
->purr
- purr
- p0
->stolen
;
295 account_steal_time(current
, stolen
);
296 p0
->stolen
+= stolen
;
300 spin_unlock(&p0
->lock
);
304 * Must be called before the cpu is added to the online map when
305 * a cpu is being brought up at runtime.
307 static void snapshot_purr(void)
311 struct cpu_purr_data
*p0
, *pme
, *phim
;
314 if (!cpu_has_feature(CPU_FTR_PURR
))
316 cpu
= smp_processor_id();
317 pme
= &per_cpu(cpu_purr_data
, cpu
);
318 p0
= &per_cpu(cpu_purr_data
, cpu
& ~1);
319 phim
= &per_cpu(cpu_purr_data
, cpu
^ 1);
320 spin_lock_irqsave(&p0
->lock
, flags
);
321 pme
->tb
= pme
->tb0
= mftb();
322 purr
= mfspr(SPRN_PURR
);
323 if (!phim
->initialized
) {
327 /* set p->purr and p->purr0 for no change in p0->stolen */
328 pme
->purr
= phim
->tb
- phim
->tb0
- phim
->purr
- p0
->stolen
;
329 pme
->purr0
= purr
- pme
->purr
;
331 pme
->initialized
= 1;
332 spin_unlock_irqrestore(&p0
->lock
, flags
);
335 #endif /* CONFIG_PPC_SPLPAR */
337 #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
338 #define calc_cputime_factors()
339 #define account_process_time(regs) update_process_times(user_mode(regs))
340 #define calculate_steal_time() do { } while (0)
343 #if !(defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR))
344 #define snapshot_purr() do { } while (0)
348 * Called when a cpu comes up after the system has finished booting,
349 * i.e. as a result of a hotplug cpu action.
351 void snapshot_timebase(void)
353 __get_cpu_var(last_jiffy
) = get_tb();
357 void __delay(unsigned long loops
)
365 /* the RTCL register wraps at 1000000000 */
366 diff
= get_rtcl() - start
;
369 } while (diff
< loops
);
372 while (get_tbl() - start
< loops
)
377 EXPORT_SYMBOL(__delay
);
379 void udelay(unsigned long usecs
)
381 __delay(tb_ticks_per_usec
* usecs
);
383 EXPORT_SYMBOL(udelay
);
385 static __inline__
void timer_check_rtc(void)
388 * update the rtc when needed, this should be performed on the
389 * right fraction of a second. Half or full second ?
390 * Full second works on mk48t59 clocks, others need testing.
391 * Note that this update is basically only used through
392 * the adjtimex system calls. Setting the HW clock in
393 * any other way is a /dev/rtc and userland business.
394 * This is still wrong by -0.5/+1.5 jiffies because of the
395 * timer interrupt resolution and possible delay, but here we
396 * hit a quantization limit which can only be solved by higher
397 * resolution timers and decoupling time management from timer
398 * interrupts. This is also wrong on the clocks
399 * which require being written at the half second boundary.
400 * We should have an rtc call that only sets the minutes and
401 * seconds like on Intel to avoid problems with non UTC clocks.
403 if (ppc_md
.set_rtc_time
&& ntp_synced() &&
404 xtime
.tv_sec
- last_rtc_update
>= 659 &&
405 abs((xtime
.tv_nsec
/1000) - (1000000-1000000/HZ
)) < 500000/HZ
) {
407 to_tm(xtime
.tv_sec
+ 1 + timezone_offset
, &tm
);
410 if (ppc_md
.set_rtc_time(&tm
) == 0)
411 last_rtc_update
= xtime
.tv_sec
+ 1;
413 /* Try again one minute later */
414 last_rtc_update
+= 60;
419 * This version of gettimeofday has microsecond resolution.
421 static inline void __do_gettimeofday(struct timeval
*tv
, u64 tb_val
)
423 unsigned long sec
, usec
;
425 struct gettimeofday_vars
*temp_varp
;
426 u64 temp_tb_to_xs
, temp_stamp_xsec
;
429 * These calculations are faster (gets rid of divides)
430 * if done in units of 1/2^20 rather than microseconds.
431 * The conversion to microseconds at the end is done
432 * without a divide (and in fact, without a multiply)
434 temp_varp
= do_gtod
.varp
;
435 tb_ticks
= tb_val
- temp_varp
->tb_orig_stamp
;
436 temp_tb_to_xs
= temp_varp
->tb_to_xs
;
437 temp_stamp_xsec
= temp_varp
->stamp_xsec
;
438 xsec
= temp_stamp_xsec
+ mulhdu(tb_ticks
, temp_tb_to_xs
);
439 sec
= xsec
/ XSEC_PER_SEC
;
440 usec
= (unsigned long)xsec
& (XSEC_PER_SEC
- 1);
441 usec
= SCALE_XSEC(usec
, 1000000);
447 void do_gettimeofday(struct timeval
*tv
)
450 /* do this the old way */
451 unsigned long flags
, seq
;
452 unsigned int sec
, nsec
, usec
;
455 seq
= read_seqbegin_irqsave(&xtime_lock
, flags
);
457 nsec
= xtime
.tv_nsec
+ tb_ticks_since(tb_last_stamp
);
458 } while (read_seqretry_irqrestore(&xtime_lock
, seq
, flags
));
460 while (usec
>= 1000000) {
468 __do_gettimeofday(tv
, get_tb());
471 EXPORT_SYMBOL(do_gettimeofday
);
474 * There are two copies of tb_to_xs and stamp_xsec so that no
475 * lock is needed to access and use these values in
476 * do_gettimeofday. We alternate the copies and as long as a
477 * reasonable time elapses between changes, there will never
478 * be inconsistent values. ntpd has a minimum of one minute
481 static inline void update_gtod(u64 new_tb_stamp
, u64 new_stamp_xsec
,
485 struct gettimeofday_vars
*temp_varp
;
487 temp_idx
= (do_gtod
.var_idx
== 0);
488 temp_varp
= &do_gtod
.vars
[temp_idx
];
490 temp_varp
->tb_to_xs
= new_tb_to_xs
;
491 temp_varp
->tb_orig_stamp
= new_tb_stamp
;
492 temp_varp
->stamp_xsec
= new_stamp_xsec
;
494 do_gtod
.varp
= temp_varp
;
495 do_gtod
.var_idx
= temp_idx
;
498 * tb_update_count is used to allow the userspace gettimeofday code
499 * to assure itself that it sees a consistent view of the tb_to_xs and
500 * stamp_xsec variables. It reads the tb_update_count, then reads
501 * tb_to_xs and stamp_xsec and then reads tb_update_count again. If
502 * the two values of tb_update_count match and are even then the
503 * tb_to_xs and stamp_xsec values are consistent. If not, then it
504 * loops back and reads them again until this criteria is met.
505 * We expect the caller to have done the first increment of
506 * vdso_data->tb_update_count already.
508 vdso_data
->tb_orig_stamp
= new_tb_stamp
;
509 vdso_data
->stamp_xsec
= new_stamp_xsec
;
510 vdso_data
->tb_to_xs
= new_tb_to_xs
;
511 vdso_data
->wtom_clock_sec
= wall_to_monotonic
.tv_sec
;
512 vdso_data
->wtom_clock_nsec
= wall_to_monotonic
.tv_nsec
;
514 ++(vdso_data
->tb_update_count
);
518 * When the timebase - tb_orig_stamp gets too big, we do a manipulation
519 * between tb_orig_stamp and stamp_xsec. The goal here is to keep the
520 * difference tb - tb_orig_stamp small enough to always fit inside a
521 * 32 bits number. This is a requirement of our fast 32 bits userland
522 * implementation in the vdso. If we "miss" a call to this function
523 * (interrupt latency, CPU locked in a spinlock, ...) and we end up
524 * with a too big difference, then the vdso will fallback to calling
527 static __inline__
void timer_recalc_offset(u64 cur_tb
)
529 unsigned long offset
;
532 u64 tb
, xsec_old
, xsec_new
;
533 struct gettimeofday_vars
*varp
;
537 tlen
= current_tick_length();
538 offset
= cur_tb
- do_gtod
.varp
->tb_orig_stamp
;
539 if (tlen
== last_tick_len
&& offset
< 0x80000000u
)
541 if (tlen
!= last_tick_len
) {
542 t2x
= mulhdu(tlen
<< TICKLEN_SHIFT
, ticklen_to_xs
);
543 last_tick_len
= tlen
;
545 t2x
= do_gtod
.varp
->tb_to_xs
;
546 new_stamp_xsec
= (u64
) xtime
.tv_nsec
* XSEC_PER_SEC
;
547 do_div(new_stamp_xsec
, 1000000000);
548 new_stamp_xsec
+= (u64
) xtime
.tv_sec
* XSEC_PER_SEC
;
550 ++vdso_data
->tb_update_count
;
554 * Make sure time doesn't go backwards for userspace gettimeofday.
558 xsec_old
= mulhdu(tb
- varp
->tb_orig_stamp
, varp
->tb_to_xs
)
560 xsec_new
= mulhdu(tb
- cur_tb
, t2x
) + new_stamp_xsec
;
561 if (xsec_new
< xsec_old
)
562 new_stamp_xsec
+= xsec_old
- xsec_new
;
564 update_gtod(cur_tb
, new_stamp_xsec
, t2x
);
568 unsigned long profile_pc(struct pt_regs
*regs
)
570 unsigned long pc
= instruction_pointer(regs
);
572 if (in_lock_functions(pc
))
577 EXPORT_SYMBOL(profile_pc
);
580 #ifdef CONFIG_PPC_ISERIES
583 * This function recalibrates the timebase based on the 49-bit time-of-day
584 * value in the Titan chip. The Titan is much more accurate than the value
585 * returned by the service processor for the timebase frequency.
588 static void iSeries_tb_recal(void)
590 struct div_result divres
;
591 unsigned long titan
, tb
;
593 titan
= HvCallXm_loadTod();
594 if ( iSeries_recal_titan
) {
595 unsigned long tb_ticks
= tb
- iSeries_recal_tb
;
596 unsigned long titan_usec
= (titan
- iSeries_recal_titan
) >> 12;
597 unsigned long new_tb_ticks_per_sec
= (tb_ticks
* USEC_PER_SEC
)/titan_usec
;
598 unsigned long new_tb_ticks_per_jiffy
= (new_tb_ticks_per_sec
+(HZ
/2))/HZ
;
599 long tick_diff
= new_tb_ticks_per_jiffy
- tb_ticks_per_jiffy
;
601 /* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */
602 new_tb_ticks_per_sec
= new_tb_ticks_per_jiffy
* HZ
;
604 if ( tick_diff
< 0 ) {
605 tick_diff
= -tick_diff
;
609 if ( tick_diff
< tb_ticks_per_jiffy
/25 ) {
610 printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n",
611 new_tb_ticks_per_jiffy
, sign
, tick_diff
);
612 tb_ticks_per_jiffy
= new_tb_ticks_per_jiffy
;
613 tb_ticks_per_sec
= new_tb_ticks_per_sec
;
614 calc_cputime_factors();
615 div128_by_32( XSEC_PER_SEC
, 0, tb_ticks_per_sec
, &divres
);
616 do_gtod
.tb_ticks_per_sec
= tb_ticks_per_sec
;
617 tb_to_xs
= divres
.result_low
;
618 do_gtod
.varp
->tb_to_xs
= tb_to_xs
;
619 vdso_data
->tb_ticks_per_sec
= tb_ticks_per_sec
;
620 vdso_data
->tb_to_xs
= tb_to_xs
;
623 printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
624 " new tb_ticks_per_jiffy = %lu\n"
625 " old tb_ticks_per_jiffy = %lu\n",
626 new_tb_ticks_per_jiffy
, tb_ticks_per_jiffy
);
630 iSeries_recal_titan
= titan
;
631 iSeries_recal_tb
= tb
;
636 * For iSeries shared processors, we have to let the hypervisor
637 * set the hardware decrementer. We set a virtual decrementer
638 * in the lppaca and call the hypervisor if the virtual
639 * decrementer is less than the current value in the hardware
640 * decrementer. (almost always the new decrementer value will
641 * be greater than the current hardware decementer so the hypervisor
642 * call will not be needed)
646 * timer_interrupt - gets called when the decrementer overflows,
647 * with interrupts disabled.
649 void timer_interrupt(struct pt_regs
* regs
)
652 int cpu
= smp_processor_id();
656 if (atomic_read(&ppc_n_lost_interrupts
) != 0)
662 profile_tick(CPU_PROFILING
, regs
);
663 calculate_steal_time();
665 #ifdef CONFIG_PPC_ISERIES
666 get_lppaca()->int_dword
.fields
.decr_int
= 0;
669 while ((ticks
= tb_ticks_since(per_cpu(last_jiffy
, cpu
)))
670 >= tb_ticks_per_jiffy
) {
671 /* Update last_jiffy */
672 per_cpu(last_jiffy
, cpu
) += tb_ticks_per_jiffy
;
673 /* Handle RTCL overflow on 601 */
674 if (__USE_RTC() && per_cpu(last_jiffy
, cpu
) >= 1000000000)
675 per_cpu(last_jiffy
, cpu
) -= 1000000000;
678 * We cannot disable the decrementer, so in the period
679 * between this cpu's being marked offline in cpu_online_map
680 * and calling stop-self, it is taking timer interrupts.
681 * Avoid calling into the scheduler rebalancing code if this
684 if (!cpu_is_offline(cpu
))
685 account_process_time(regs
);
688 * No need to check whether cpu is offline here; boot_cpuid
689 * should have been fixed up by now.
691 if (cpu
!= boot_cpuid
)
694 write_seqlock(&xtime_lock
);
695 tb_last_jiffy
+= tb_ticks_per_jiffy
;
696 tb_last_stamp
= per_cpu(last_jiffy
, cpu
);
698 timer_recalc_offset(tb_last_jiffy
);
700 write_sequnlock(&xtime_lock
);
703 next_dec
= tb_ticks_per_jiffy
- ticks
;
706 #ifdef CONFIG_PPC_ISERIES
707 if (hvlpevent_is_pending())
708 process_hvlpevents(regs
);
712 /* collect purr register values often, for accurate calculations */
713 if (firmware_has_feature(FW_FEATURE_SPLPAR
)) {
714 struct cpu_usage
*cu
= &__get_cpu_var(cpu_usage_array
);
715 cu
->current_tb
= mfspr(SPRN_PURR
);
722 void wakeup_decrementer(void)
727 * The timebase gets saved on sleep and restored on wakeup,
728 * so all we need to do is to reset the decrementer.
730 ticks
= tb_ticks_since(__get_cpu_var(last_jiffy
));
731 if (ticks
< tb_ticks_per_jiffy
)
732 ticks
= tb_ticks_per_jiffy
- ticks
;
739 void __init
smp_space_timers(unsigned int max_cpus
)
742 unsigned long half
= tb_ticks_per_jiffy
/ 2;
743 unsigned long offset
= tb_ticks_per_jiffy
/ max_cpus
;
744 unsigned long previous_tb
= per_cpu(last_jiffy
, boot_cpuid
);
746 /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */
747 previous_tb
-= tb_ticks_per_jiffy
;
749 * The stolen time calculation for POWER5 shared-processor LPAR
750 * systems works better if the two threads' timebase interrupts
751 * are staggered by half a jiffy with respect to each other.
753 for_each_possible_cpu(i
) {
756 if (i
== (boot_cpuid
^ 1))
757 per_cpu(last_jiffy
, i
) =
758 per_cpu(last_jiffy
, boot_cpuid
) - half
;
760 per_cpu(last_jiffy
, i
) =
761 per_cpu(last_jiffy
, i
^ 1) + half
;
763 previous_tb
+= offset
;
764 per_cpu(last_jiffy
, i
) = previous_tb
;
771 * Scheduler clock - returns current time in nanosec units.
773 * Note: mulhdu(a, b) (multiply high double unsigned) returns
774 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
775 * are 64-bit unsigned numbers.
777 unsigned long long sched_clock(void)
781 return mulhdu(get_tb(), tb_to_ns_scale
) << tb_to_ns_shift
;
784 int do_settimeofday(struct timespec
*tv
)
786 time_t wtm_sec
, new_sec
= tv
->tv_sec
;
787 long wtm_nsec
, new_nsec
= tv
->tv_nsec
;
790 unsigned long tb_delta
;
792 if ((unsigned long)tv
->tv_nsec
>= NSEC_PER_SEC
)
795 write_seqlock_irqsave(&xtime_lock
, flags
);
798 * Updating the RTC is not the job of this code. If the time is
799 * stepped under NTP, the RTC will be updated after STA_UNSYNC
800 * is cleared. Tools like clock/hwclock either copy the RTC
801 * to the system time, in which case there is no point in writing
802 * to the RTC again, or write to the RTC but then they don't call
803 * settimeofday to perform this operation.
805 #ifdef CONFIG_PPC_ISERIES
806 if (first_settimeofday
) {
808 first_settimeofday
= 0;
812 /* Make userspace gettimeofday spin until we're done. */
813 ++vdso_data
->tb_update_count
;
817 * Subtract off the number of nanoseconds since the
818 * beginning of the last tick.
819 * Note that since we don't increment jiffies_64 anywhere other
820 * than in do_timer (since we don't have a lost tick problem),
821 * wall_jiffies will always be the same as jiffies,
822 * and therefore the (jiffies - wall_jiffies) computation
825 tb_delta
= tb_ticks_since(tb_last_stamp
);
826 tb_delta
= mulhdu(tb_delta
, do_gtod
.varp
->tb_to_xs
); /* in xsec */
827 new_nsec
-= SCALE_XSEC(tb_delta
, 1000000000);
829 wtm_sec
= wall_to_monotonic
.tv_sec
+ (xtime
.tv_sec
- new_sec
);
830 wtm_nsec
= wall_to_monotonic
.tv_nsec
+ (xtime
.tv_nsec
- new_nsec
);
832 set_normalized_timespec(&xtime
, new_sec
, new_nsec
);
833 set_normalized_timespec(&wall_to_monotonic
, wtm_sec
, wtm_nsec
);
835 /* In case of a large backwards jump in time with NTP, we want the
836 * clock to be updated as soon as the PLL is again in lock.
838 last_rtc_update
= new_sec
- 658;
842 new_xsec
= xtime
.tv_nsec
;
844 new_xsec
*= XSEC_PER_SEC
;
845 do_div(new_xsec
, NSEC_PER_SEC
);
847 new_xsec
+= (u64
)xtime
.tv_sec
* XSEC_PER_SEC
;
848 update_gtod(tb_last_jiffy
, new_xsec
, do_gtod
.varp
->tb_to_xs
);
850 vdso_data
->tz_minuteswest
= sys_tz
.tz_minuteswest
;
851 vdso_data
->tz_dsttime
= sys_tz
.tz_dsttime
;
853 write_sequnlock_irqrestore(&xtime_lock
, flags
);
858 EXPORT_SYMBOL(do_settimeofday
);
860 static int __init
get_freq(char *name
, int cells
, unsigned long *val
)
862 struct device_node
*cpu
;
866 /* The cpu node should have timebase and clock frequency properties */
867 cpu
= of_find_node_by_type(NULL
, "cpu");
870 fp
= (unsigned int *)get_property(cpu
, name
, NULL
);
875 *val
= (*val
<< 32) | *fp
++;
884 void __init
generic_calibrate_decr(void)
886 ppc_tb_freq
= DEFAULT_TB_FREQ
; /* hardcoded default */
888 if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq
) &&
889 !get_freq("timebase-frequency", 1, &ppc_tb_freq
)) {
891 printk(KERN_ERR
"WARNING: Estimating decrementer frequency "
895 ppc_proc_freq
= DEFAULT_PROC_FREQ
; /* hardcoded default */
897 if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq
) &&
898 !get_freq("clock-frequency", 1, &ppc_proc_freq
)) {
900 printk(KERN_ERR
"WARNING: Estimating processor frequency "
905 /* Set the time base to zero */
909 /* Clear any pending timer interrupts */
910 mtspr(SPRN_TSR
, TSR_ENW
| TSR_WIS
| TSR_DIS
| TSR_FIS
);
912 /* Enable decrementer interrupt */
913 mtspr(SPRN_TCR
, TCR_DIE
);
917 unsigned long get_boot_time(void)
921 if (ppc_md
.get_boot_time
)
922 return ppc_md
.get_boot_time();
923 if (!ppc_md
.get_rtc_time
)
925 ppc_md
.get_rtc_time(&tm
);
926 return mktime(tm
.tm_year
+1900, tm
.tm_mon
+1, tm
.tm_mday
,
927 tm
.tm_hour
, tm
.tm_min
, tm
.tm_sec
);
930 /* This function is only called on the boot processor */
931 void __init
time_init(void)
934 unsigned long tm
= 0;
935 struct div_result res
;
939 if (ppc_md
.time_init
!= NULL
)
940 timezone_offset
= ppc_md
.time_init();
943 /* 601 processor: dec counts down by 128 every 128ns */
944 ppc_tb_freq
= 1000000000;
945 tb_last_stamp
= get_rtcl();
946 tb_last_jiffy
= tb_last_stamp
;
948 /* Normal PowerPC with timebase register */
949 ppc_md
.calibrate_decr();
950 printk(KERN_DEBUG
"time_init: decrementer frequency = %lu.%.6lu MHz\n",
951 ppc_tb_freq
/ 1000000, ppc_tb_freq
% 1000000);
952 printk(KERN_DEBUG
"time_init: processor frequency = %lu.%.6lu MHz\n",
953 ppc_proc_freq
/ 1000000, ppc_proc_freq
% 1000000);
954 tb_last_stamp
= tb_last_jiffy
= get_tb();
957 tb_ticks_per_jiffy
= ppc_tb_freq
/ HZ
;
958 tb_ticks_per_sec
= ppc_tb_freq
;
959 tb_ticks_per_usec
= ppc_tb_freq
/ 1000000;
960 tb_to_us
= mulhwu_scale_factor(ppc_tb_freq
, 1000000);
961 calc_cputime_factors();
964 * Calculate the length of each tick in ns. It will not be
965 * exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ.
966 * We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq,
969 x
= (u64
) NSEC_PER_SEC
* tb_ticks_per_jiffy
+ ppc_tb_freq
- 1;
970 do_div(x
, ppc_tb_freq
);
972 last_tick_len
= x
<< TICKLEN_SCALE
;
975 * Compute ticklen_to_xs, which is a factor which gets multiplied
976 * by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value.
978 * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9)
979 * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT
980 * which turns out to be N = 51 - SHIFT_HZ.
981 * This gives the result as a 0.64 fixed-point fraction.
982 * That value is reduced by an offset amounting to 1 xsec per
983 * 2^31 timebase ticks to avoid problems with time going backwards
984 * by 1 xsec when we do timer_recalc_offset due to losing the
985 * fractional xsec. That offset is equal to ppc_tb_freq/2^51
986 * since there are 2^20 xsec in a second.
988 div128_by_32((1ULL << 51) - ppc_tb_freq
, 0,
989 tb_ticks_per_jiffy
<< SHIFT_HZ
, &res
);
990 div128_by_32(res
.result_high
, res
.result_low
, NSEC_PER_SEC
, &res
);
991 ticklen_to_xs
= res
.result_low
;
993 /* Compute tb_to_xs from tick_nsec */
994 tb_to_xs
= mulhdu(last_tick_len
<< TICKLEN_SHIFT
, ticklen_to_xs
);
997 * Compute scale factor for sched_clock.
998 * The calibrate_decr() function has set tb_ticks_per_sec,
999 * which is the timebase frequency.
1000 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
1001 * the 128-bit result as a 64.64 fixed-point number.
1002 * We then shift that number right until it is less than 1.0,
1003 * giving us the scale factor and shift count to use in
1006 div128_by_32(1000000000, 0, tb_ticks_per_sec
, &res
);
1007 scale
= res
.result_low
;
1008 for (shift
= 0; res
.result_high
!= 0; ++shift
) {
1009 scale
= (scale
>> 1) | (res
.result_high
<< 63);
1010 res
.result_high
>>= 1;
1012 tb_to_ns_scale
= scale
;
1013 tb_to_ns_shift
= shift
;
1015 tm
= get_boot_time();
1017 write_seqlock_irqsave(&xtime_lock
, flags
);
1019 /* If platform provided a timezone (pmac), we correct the time */
1020 if (timezone_offset
) {
1021 sys_tz
.tz_minuteswest
= -timezone_offset
/ 60;
1022 sys_tz
.tz_dsttime
= 0;
1023 tm
-= timezone_offset
;
1028 do_gtod
.varp
= &do_gtod
.vars
[0];
1029 do_gtod
.var_idx
= 0;
1030 do_gtod
.varp
->tb_orig_stamp
= tb_last_jiffy
;
1031 __get_cpu_var(last_jiffy
) = tb_last_stamp
;
1032 do_gtod
.varp
->stamp_xsec
= (u64
) xtime
.tv_sec
* XSEC_PER_SEC
;
1033 do_gtod
.tb_ticks_per_sec
= tb_ticks_per_sec
;
1034 do_gtod
.varp
->tb_to_xs
= tb_to_xs
;
1035 do_gtod
.tb_to_us
= tb_to_us
;
1037 vdso_data
->tb_orig_stamp
= tb_last_jiffy
;
1038 vdso_data
->tb_update_count
= 0;
1039 vdso_data
->tb_ticks_per_sec
= tb_ticks_per_sec
;
1040 vdso_data
->stamp_xsec
= (u64
) xtime
.tv_sec
* XSEC_PER_SEC
;
1041 vdso_data
->tb_to_xs
= tb_to_xs
;
1045 last_rtc_update
= xtime
.tv_sec
;
1046 set_normalized_timespec(&wall_to_monotonic
,
1047 -xtime
.tv_sec
, -xtime
.tv_nsec
);
1048 write_sequnlock_irqrestore(&xtime_lock
, flags
);
1050 /* Not exact, but the timer interrupt takes care of this */
1051 set_dec(tb_ticks_per_jiffy
);
1056 #define STARTOFTIME 1970
1057 #define SECDAY 86400L
1058 #define SECYR (SECDAY * 365)
1059 #define leapyear(year) ((year) % 4 == 0 && \
1060 ((year) % 100 != 0 || (year) % 400 == 0))
1061 #define days_in_year(a) (leapyear(a) ? 366 : 365)
1062 #define days_in_month(a) (month_days[(a) - 1])
1064 static int month_days
[12] = {
1065 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
1069 * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
1071 void GregorianDay(struct rtc_time
* tm
)
1076 int MonthOffset
[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
1078 lastYear
= tm
->tm_year
- 1;
1081 * Number of leap corrections to apply up to end of last year
1083 leapsToDate
= lastYear
/ 4 - lastYear
/ 100 + lastYear
/ 400;
1086 * This year is a leap year if it is divisible by 4 except when it is
1087 * divisible by 100 unless it is divisible by 400
1089 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
1091 day
= tm
->tm_mon
> 2 && leapyear(tm
->tm_year
);
1093 day
+= lastYear
*365 + leapsToDate
+ MonthOffset
[tm
->tm_mon
-1] +
1096 tm
->tm_wday
= day
% 7;
1099 void to_tm(int tim
, struct rtc_time
* tm
)
1102 register long hms
, day
;
1107 /* Hours, minutes, seconds are easy */
1108 tm
->tm_hour
= hms
/ 3600;
1109 tm
->tm_min
= (hms
% 3600) / 60;
1110 tm
->tm_sec
= (hms
% 3600) % 60;
1112 /* Number of years in days */
1113 for (i
= STARTOFTIME
; day
>= days_in_year(i
); i
++)
1114 day
-= days_in_year(i
);
1117 /* Number of months in days left */
1118 if (leapyear(tm
->tm_year
))
1119 days_in_month(FEBRUARY
) = 29;
1120 for (i
= 1; day
>= days_in_month(i
); i
++)
1121 day
-= days_in_month(i
);
1122 days_in_month(FEBRUARY
) = 28;
1125 /* Days are what is left over (+1) from all that. */
1126 tm
->tm_mday
= day
+ 1;
1129 * Determine the day of week
1134 /* Auxiliary function to compute scaling factors */
1135 /* Actually the choice of a timebase running at 1/4 the of the bus
1136 * frequency giving resolution of a few tens of nanoseconds is quite nice.
1137 * It makes this computation very precise (27-28 bits typically) which
1138 * is optimistic considering the stability of most processor clock
1139 * oscillators and the precision with which the timebase frequency
1140 * is measured but does not harm.
1142 unsigned mulhwu_scale_factor(unsigned inscale
, unsigned outscale
)
1144 unsigned mlt
=0, tmp
, err
;
1145 /* No concern for performance, it's done once: use a stupid
1146 * but safe and compact method to find the multiplier.
1149 for (tmp
= 1U<<31; tmp
!= 0; tmp
>>= 1) {
1150 if (mulhwu(inscale
, mlt
|tmp
) < outscale
)
1154 /* We might still be off by 1 for the best approximation.
1155 * A side effect of this is that if outscale is too large
1156 * the returned value will be zero.
1157 * Many corner cases have been checked and seem to work,
1158 * some might have been forgotten in the test however.
1161 err
= inscale
* (mlt
+1);
1162 if (err
<= inscale
/2)
1168 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
1171 void div128_by_32(u64 dividend_high
, u64 dividend_low
,
1172 unsigned divisor
, struct div_result
*dr
)
1174 unsigned long a
, b
, c
, d
;
1175 unsigned long w
, x
, y
, z
;
1178 a
= dividend_high
>> 32;
1179 b
= dividend_high
& 0xffffffff;
1180 c
= dividend_low
>> 32;
1181 d
= dividend_low
& 0xffffffff;
1184 ra
= ((u64
)(a
- (w
* divisor
)) << 32) + b
;
1186 rb
= ((u64
) do_div(ra
, divisor
) << 32) + c
;
1189 rc
= ((u64
) do_div(rb
, divisor
) << 32) + d
;
1192 do_div(rc
, divisor
);
1195 dr
->result_high
= ((u64
)w
<< 32) + x
;
1196 dr
->result_low
= ((u64
)y
<< 32) + z
;