2 * linux/kernel/time/timekeeping.c
4 * Kernel timekeeping code and accessor functions
6 * This code was moved from linux/kernel/timer.c.
7 * Please see that file for copyright and history logs.
11 #include <linux/timekeeper_internal.h>
12 #include <linux/module.h>
13 #include <linux/interrupt.h>
14 #include <linux/percpu.h>
15 #include <linux/init.h>
17 #include <linux/sched.h>
18 #include <linux/syscore_ops.h>
19 #include <linux/clocksource.h>
20 #include <linux/jiffies.h>
21 #include <linux/time.h>
22 #include <linux/tick.h>
23 #include <linux/stop_machine.h>
24 #include <linux/pvclock_gtod.h>
25 #include <linux/compiler.h>
27 #include "tick-internal.h"
28 #include "ntp_internal.h"
29 #include "timekeeping_internal.h"
31 #define TK_CLEAR_NTP (1 << 0)
32 #define TK_MIRROR (1 << 1)
33 #define TK_CLOCK_WAS_SET (1 << 2)
35 static struct timekeeper timekeeper
;
36 static DEFINE_RAW_SPINLOCK(timekeeper_lock
);
37 static seqcount_t timekeeper_seq
;
38 static struct timekeeper shadow_timekeeper
;
40 /* flag for if timekeeping is suspended */
41 int __read_mostly timekeeping_suspended
;
43 /* Flag for if there is a persistent clock on this platform */
44 bool __read_mostly persistent_clock_exist
= false;
46 static inline void tk_normalize_xtime(struct timekeeper
*tk
)
48 while (tk
->xtime_nsec
>= ((u64
)NSEC_PER_SEC
<< tk
->shift
)) {
49 tk
->xtime_nsec
-= (u64
)NSEC_PER_SEC
<< tk
->shift
;
54 static void tk_set_xtime(struct timekeeper
*tk
, const struct timespec
*ts
)
56 tk
->xtime_sec
= ts
->tv_sec
;
57 tk
->xtime_nsec
= (u64
)ts
->tv_nsec
<< tk
->shift
;
60 static void tk_xtime_add(struct timekeeper
*tk
, const struct timespec
*ts
)
62 tk
->xtime_sec
+= ts
->tv_sec
;
63 tk
->xtime_nsec
+= (u64
)ts
->tv_nsec
<< tk
->shift
;
64 tk_normalize_xtime(tk
);
67 static void tk_set_wall_to_mono(struct timekeeper
*tk
, struct timespec wtm
)
72 * Verify consistency of: offset_real = -wall_to_monotonic
73 * before modifying anything
75 set_normalized_timespec(&tmp
, -tk
->wall_to_monotonic
.tv_sec
,
76 -tk
->wall_to_monotonic
.tv_nsec
);
77 WARN_ON_ONCE(tk
->offs_real
.tv64
!= timespec_to_ktime(tmp
).tv64
);
78 tk
->wall_to_monotonic
= wtm
;
79 set_normalized_timespec(&tmp
, -wtm
.tv_sec
, -wtm
.tv_nsec
);
80 tk
->offs_real
= timespec_to_ktime(tmp
);
81 tk
->offs_tai
= ktime_add(tk
->offs_real
, ktime_set(tk
->tai_offset
, 0));
84 static void tk_set_sleep_time(struct timekeeper
*tk
, struct timespec t
)
86 /* Verify consistency before modifying */
87 WARN_ON_ONCE(tk
->offs_boot
.tv64
!= timespec_to_ktime(tk
->total_sleep_time
).tv64
);
89 tk
->total_sleep_time
= t
;
90 tk
->offs_boot
= timespec_to_ktime(t
);
94 * tk_setup_internals - Set up internals to use clocksource clock.
96 * @tk: The target timekeeper to setup.
97 * @clock: Pointer to clocksource.
99 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
100 * pair and interval request.
102 * Unless you're the timekeeping code, you should not be using this!
104 static void tk_setup_internals(struct timekeeper
*tk
, struct clocksource
*clock
)
107 u64 tmp
, ntpinterval
;
108 struct clocksource
*old_clock
;
110 old_clock
= tk
->clock
;
112 tk
->cycle_last
= clock
->cycle_last
= clock
->read(clock
);
114 /* Do the ns -> cycle conversion first, using original mult */
115 tmp
= NTP_INTERVAL_LENGTH
;
116 tmp
<<= clock
->shift
;
118 tmp
+= clock
->mult
/2;
119 do_div(tmp
, clock
->mult
);
123 interval
= (cycle_t
) tmp
;
124 tk
->cycle_interval
= interval
;
126 /* Go back from cycles -> shifted ns */
127 tk
->xtime_interval
= (u64
) interval
* clock
->mult
;
128 tk
->xtime_remainder
= ntpinterval
- tk
->xtime_interval
;
130 ((u64
) interval
* clock
->mult
) >> clock
->shift
;
132 /* if changing clocks, convert xtime_nsec shift units */
134 int shift_change
= clock
->shift
- old_clock
->shift
;
135 if (shift_change
< 0)
136 tk
->xtime_nsec
>>= -shift_change
;
138 tk
->xtime_nsec
<<= shift_change
;
140 tk
->shift
= clock
->shift
;
143 tk
->ntp_error_shift
= NTP_SCALE_SHIFT
- clock
->shift
;
146 * The timekeeper keeps its own mult values for the currently
147 * active clocksource. These value will be adjusted via NTP
148 * to counteract clock drifting.
150 tk
->mult
= clock
->mult
;
153 /* Timekeeper helper functions. */
155 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
156 u32 (*arch_gettimeoffset
)(void);
158 u32
get_arch_timeoffset(void)
160 if (likely(arch_gettimeoffset
))
161 return arch_gettimeoffset();
165 static inline u32
get_arch_timeoffset(void) { return 0; }
168 static inline s64
timekeeping_get_ns(struct timekeeper
*tk
)
170 cycle_t cycle_now
, cycle_delta
;
171 struct clocksource
*clock
;
174 /* read clocksource: */
176 cycle_now
= clock
->read(clock
);
178 /* calculate the delta since the last update_wall_time: */
179 cycle_delta
= (cycle_now
- clock
->cycle_last
) & clock
->mask
;
181 nsec
= cycle_delta
* tk
->mult
+ tk
->xtime_nsec
;
184 /* If arch requires, add in get_arch_timeoffset() */
185 return nsec
+ get_arch_timeoffset();
188 static inline s64
timekeeping_get_ns_raw(struct timekeeper
*tk
)
190 cycle_t cycle_now
, cycle_delta
;
191 struct clocksource
*clock
;
194 /* read clocksource: */
196 cycle_now
= clock
->read(clock
);
198 /* calculate the delta since the last update_wall_time: */
199 cycle_delta
= (cycle_now
- clock
->cycle_last
) & clock
->mask
;
201 /* convert delta to nanoseconds. */
202 nsec
= clocksource_cyc2ns(cycle_delta
, clock
->mult
, clock
->shift
);
204 /* If arch requires, add in get_arch_timeoffset() */
205 return nsec
+ get_arch_timeoffset();
208 static RAW_NOTIFIER_HEAD(pvclock_gtod_chain
);
210 static void update_pvclock_gtod(struct timekeeper
*tk
, bool was_set
)
212 raw_notifier_call_chain(&pvclock_gtod_chain
, was_set
, tk
);
216 * pvclock_gtod_register_notifier - register a pvclock timedata update listener
218 int pvclock_gtod_register_notifier(struct notifier_block
*nb
)
220 struct timekeeper
*tk
= &timekeeper
;
224 raw_spin_lock_irqsave(&timekeeper_lock
, flags
);
225 ret
= raw_notifier_chain_register(&pvclock_gtod_chain
, nb
);
226 update_pvclock_gtod(tk
, true);
227 raw_spin_unlock_irqrestore(&timekeeper_lock
, flags
);
231 EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier
);
234 * pvclock_gtod_unregister_notifier - unregister a pvclock
235 * timedata update listener
237 int pvclock_gtod_unregister_notifier(struct notifier_block
*nb
)
242 raw_spin_lock_irqsave(&timekeeper_lock
, flags
);
243 ret
= raw_notifier_chain_unregister(&pvclock_gtod_chain
, nb
);
244 raw_spin_unlock_irqrestore(&timekeeper_lock
, flags
);
248 EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier
);
250 /* must hold timekeeper_lock */
251 static void timekeeping_update(struct timekeeper
*tk
, unsigned int action
)
253 if (action
& TK_CLEAR_NTP
) {
258 update_pvclock_gtod(tk
, action
& TK_CLOCK_WAS_SET
);
260 if (action
& TK_MIRROR
)
261 memcpy(&shadow_timekeeper
, &timekeeper
, sizeof(timekeeper
));
265 * timekeeping_forward_now - update clock to the current time
267 * Forward the current clock to update its state since the last call to
268 * update_wall_time(). This is useful before significant clock changes,
269 * as it avoids having to deal with this time offset explicitly.
271 static void timekeeping_forward_now(struct timekeeper
*tk
)
273 cycle_t cycle_now
, cycle_delta
;
274 struct clocksource
*clock
;
278 cycle_now
= clock
->read(clock
);
279 cycle_delta
= (cycle_now
- clock
->cycle_last
) & clock
->mask
;
280 tk
->cycle_last
= clock
->cycle_last
= cycle_now
;
282 tk
->xtime_nsec
+= cycle_delta
* tk
->mult
;
284 /* If arch requires, add in get_arch_timeoffset() */
285 tk
->xtime_nsec
+= (u64
)get_arch_timeoffset() << tk
->shift
;
287 tk_normalize_xtime(tk
);
289 nsec
= clocksource_cyc2ns(cycle_delta
, clock
->mult
, clock
->shift
);
290 timespec_add_ns(&tk
->raw_time
, nsec
);
294 * __getnstimeofday - Returns the time of day in a timespec.
295 * @ts: pointer to the timespec to be set
297 * Updates the time of day in the timespec.
298 * Returns 0 on success, or -ve when suspended (timespec will be undefined).
300 int __getnstimeofday(struct timespec
*ts
)
302 struct timekeeper
*tk
= &timekeeper
;
307 seq
= read_seqcount_begin(&timekeeper_seq
);
309 ts
->tv_sec
= tk
->xtime_sec
;
310 nsecs
= timekeeping_get_ns(tk
);
312 } while (read_seqcount_retry(&timekeeper_seq
, seq
));
315 timespec_add_ns(ts
, nsecs
);
318 * Do not bail out early, in case there were callers still using
319 * the value, even in the face of the WARN_ON.
321 if (unlikely(timekeeping_suspended
))
325 EXPORT_SYMBOL(__getnstimeofday
);
328 * getnstimeofday - Returns the time of day in a timespec.
329 * @ts: pointer to the timespec to be set
331 * Returns the time of day in a timespec (WARN if suspended).
333 void getnstimeofday(struct timespec
*ts
)
335 WARN_ON(__getnstimeofday(ts
));
337 EXPORT_SYMBOL(getnstimeofday
);
339 ktime_t
ktime_get(void)
341 struct timekeeper
*tk
= &timekeeper
;
345 WARN_ON(timekeeping_suspended
);
348 seq
= read_seqcount_begin(&timekeeper_seq
);
349 secs
= tk
->xtime_sec
+ tk
->wall_to_monotonic
.tv_sec
;
350 nsecs
= timekeeping_get_ns(tk
) + tk
->wall_to_monotonic
.tv_nsec
;
352 } while (read_seqcount_retry(&timekeeper_seq
, seq
));
354 * Use ktime_set/ktime_add_ns to create a proper ktime on
355 * 32-bit architectures without CONFIG_KTIME_SCALAR.
357 return ktime_add_ns(ktime_set(secs
, 0), nsecs
);
359 EXPORT_SYMBOL_GPL(ktime_get
);
362 * ktime_get_ts - get the monotonic clock in timespec format
363 * @ts: pointer to timespec variable
365 * The function calculates the monotonic clock from the realtime
366 * clock and the wall_to_monotonic offset and stores the result
367 * in normalized timespec format in the variable pointed to by @ts.
369 void ktime_get_ts(struct timespec
*ts
)
371 struct timekeeper
*tk
= &timekeeper
;
372 struct timespec tomono
;
376 WARN_ON(timekeeping_suspended
);
379 seq
= read_seqcount_begin(&timekeeper_seq
);
380 ts
->tv_sec
= tk
->xtime_sec
;
381 nsec
= timekeeping_get_ns(tk
);
382 tomono
= tk
->wall_to_monotonic
;
384 } while (read_seqcount_retry(&timekeeper_seq
, seq
));
386 ts
->tv_sec
+= tomono
.tv_sec
;
388 timespec_add_ns(ts
, nsec
+ tomono
.tv_nsec
);
390 EXPORT_SYMBOL_GPL(ktime_get_ts
);
394 * timekeeping_clocktai - Returns the TAI time of day in a timespec
395 * @ts: pointer to the timespec to be set
397 * Returns the time of day in a timespec.
399 void timekeeping_clocktai(struct timespec
*ts
)
401 struct timekeeper
*tk
= &timekeeper
;
405 WARN_ON(timekeeping_suspended
);
408 seq
= read_seqcount_begin(&timekeeper_seq
);
410 ts
->tv_sec
= tk
->xtime_sec
+ tk
->tai_offset
;
411 nsecs
= timekeeping_get_ns(tk
);
413 } while (read_seqcount_retry(&timekeeper_seq
, seq
));
416 timespec_add_ns(ts
, nsecs
);
419 EXPORT_SYMBOL(timekeeping_clocktai
);
423 * ktime_get_clocktai - Returns the TAI time of day in a ktime
425 * Returns the time of day in a ktime.
427 ktime_t
ktime_get_clocktai(void)
431 timekeeping_clocktai(&ts
);
432 return timespec_to_ktime(ts
);
434 EXPORT_SYMBOL(ktime_get_clocktai
);
436 #ifdef CONFIG_NTP_PPS
439 * getnstime_raw_and_real - get day and raw monotonic time in timespec format
440 * @ts_raw: pointer to the timespec to be set to raw monotonic time
441 * @ts_real: pointer to the timespec to be set to the time of day
443 * This function reads both the time of day and raw monotonic time at the
444 * same time atomically and stores the resulting timestamps in timespec
447 void getnstime_raw_and_real(struct timespec
*ts_raw
, struct timespec
*ts_real
)
449 struct timekeeper
*tk
= &timekeeper
;
451 s64 nsecs_raw
, nsecs_real
;
453 WARN_ON_ONCE(timekeeping_suspended
);
456 seq
= read_seqcount_begin(&timekeeper_seq
);
458 *ts_raw
= tk
->raw_time
;
459 ts_real
->tv_sec
= tk
->xtime_sec
;
460 ts_real
->tv_nsec
= 0;
462 nsecs_raw
= timekeeping_get_ns_raw(tk
);
463 nsecs_real
= timekeeping_get_ns(tk
);
465 } while (read_seqcount_retry(&timekeeper_seq
, seq
));
467 timespec_add_ns(ts_raw
, nsecs_raw
);
468 timespec_add_ns(ts_real
, nsecs_real
);
470 EXPORT_SYMBOL(getnstime_raw_and_real
);
472 #endif /* CONFIG_NTP_PPS */
475 * do_gettimeofday - Returns the time of day in a timeval
476 * @tv: pointer to the timeval to be set
478 * NOTE: Users should be converted to using getnstimeofday()
480 void do_gettimeofday(struct timeval
*tv
)
484 getnstimeofday(&now
);
485 tv
->tv_sec
= now
.tv_sec
;
486 tv
->tv_usec
= now
.tv_nsec
/1000;
488 EXPORT_SYMBOL(do_gettimeofday
);
491 * do_settimeofday - Sets the time of day
492 * @tv: pointer to the timespec variable containing the new time
494 * Sets the time of day to the new time and update NTP and notify hrtimers
496 int do_settimeofday(const struct timespec
*tv
)
498 struct timekeeper
*tk
= &timekeeper
;
499 struct timespec ts_delta
, xt
;
502 if (!timespec_valid_strict(tv
))
505 raw_spin_lock_irqsave(&timekeeper_lock
, flags
);
506 write_seqcount_begin(&timekeeper_seq
);
508 timekeeping_forward_now(tk
);
511 ts_delta
.tv_sec
= tv
->tv_sec
- xt
.tv_sec
;
512 ts_delta
.tv_nsec
= tv
->tv_nsec
- xt
.tv_nsec
;
514 tk_set_wall_to_mono(tk
, timespec_sub(tk
->wall_to_monotonic
, ts_delta
));
516 tk_set_xtime(tk
, tv
);
518 timekeeping_update(tk
, TK_CLEAR_NTP
| TK_MIRROR
| TK_CLOCK_WAS_SET
);
520 write_seqcount_end(&timekeeper_seq
);
521 raw_spin_unlock_irqrestore(&timekeeper_lock
, flags
);
523 /* signal hrtimers about time change */
528 EXPORT_SYMBOL(do_settimeofday
);
531 * timekeeping_inject_offset - Adds or subtracts from the current time.
532 * @tv: pointer to the timespec variable containing the offset
534 * Adds or subtracts an offset value from the current time.
536 int timekeeping_inject_offset(struct timespec
*ts
)
538 struct timekeeper
*tk
= &timekeeper
;
543 if ((unsigned long)ts
->tv_nsec
>= NSEC_PER_SEC
)
546 raw_spin_lock_irqsave(&timekeeper_lock
, flags
);
547 write_seqcount_begin(&timekeeper_seq
);
549 timekeeping_forward_now(tk
);
551 /* Make sure the proposed value is valid */
552 tmp
= timespec_add(tk_xtime(tk
), *ts
);
553 if (!timespec_valid_strict(&tmp
)) {
558 tk_xtime_add(tk
, ts
);
559 tk_set_wall_to_mono(tk
, timespec_sub(tk
->wall_to_monotonic
, *ts
));
561 error
: /* even if we error out, we forwarded the time, so call update */
562 timekeeping_update(tk
, TK_CLEAR_NTP
| TK_MIRROR
| TK_CLOCK_WAS_SET
);
564 write_seqcount_end(&timekeeper_seq
);
565 raw_spin_unlock_irqrestore(&timekeeper_lock
, flags
);
567 /* signal hrtimers about time change */
572 EXPORT_SYMBOL(timekeeping_inject_offset
);
576 * timekeeping_get_tai_offset - Returns current TAI offset from UTC
579 s32
timekeeping_get_tai_offset(void)
581 struct timekeeper
*tk
= &timekeeper
;
586 seq
= read_seqcount_begin(&timekeeper_seq
);
587 ret
= tk
->tai_offset
;
588 } while (read_seqcount_retry(&timekeeper_seq
, seq
));
594 * __timekeeping_set_tai_offset - Lock free worker function
597 static void __timekeeping_set_tai_offset(struct timekeeper
*tk
, s32 tai_offset
)
599 tk
->tai_offset
= tai_offset
;
600 tk
->offs_tai
= ktime_add(tk
->offs_real
, ktime_set(tai_offset
, 0));
604 * timekeeping_set_tai_offset - Sets the current TAI offset from UTC
607 void timekeeping_set_tai_offset(s32 tai_offset
)
609 struct timekeeper
*tk
= &timekeeper
;
612 raw_spin_lock_irqsave(&timekeeper_lock
, flags
);
613 write_seqcount_begin(&timekeeper_seq
);
614 __timekeeping_set_tai_offset(tk
, tai_offset
);
615 timekeeping_update(tk
, TK_MIRROR
| TK_CLOCK_WAS_SET
);
616 write_seqcount_end(&timekeeper_seq
);
617 raw_spin_unlock_irqrestore(&timekeeper_lock
, flags
);
622 * change_clocksource - Swaps clocksources if a new one is available
624 * Accumulates current time interval and initializes new clocksource
626 static int change_clocksource(void *data
)
628 struct timekeeper
*tk
= &timekeeper
;
629 struct clocksource
*new, *old
;
632 new = (struct clocksource
*) data
;
634 raw_spin_lock_irqsave(&timekeeper_lock
, flags
);
635 write_seqcount_begin(&timekeeper_seq
);
637 timekeeping_forward_now(tk
);
639 * If the cs is in module, get a module reference. Succeeds
640 * for built-in code (owner == NULL) as well.
642 if (try_module_get(new->owner
)) {
643 if (!new->enable
|| new->enable(new) == 0) {
645 tk_setup_internals(tk
, new);
648 module_put(old
->owner
);
650 module_put(new->owner
);
653 timekeeping_update(tk
, TK_CLEAR_NTP
| TK_MIRROR
| TK_CLOCK_WAS_SET
);
655 write_seqcount_end(&timekeeper_seq
);
656 raw_spin_unlock_irqrestore(&timekeeper_lock
, flags
);
662 * timekeeping_notify - Install a new clock source
663 * @clock: pointer to the clock source
665 * This function is called from clocksource.c after a new, better clock
666 * source has been registered. The caller holds the clocksource_mutex.
668 int timekeeping_notify(struct clocksource
*clock
)
670 struct timekeeper
*tk
= &timekeeper
;
672 if (tk
->clock
== clock
)
674 stop_machine(change_clocksource
, clock
, NULL
);
676 return tk
->clock
== clock
? 0 : -1;
680 * ktime_get_real - get the real (wall-) time in ktime_t format
682 * returns the time in ktime_t format
684 ktime_t
ktime_get_real(void)
688 getnstimeofday(&now
);
690 return timespec_to_ktime(now
);
692 EXPORT_SYMBOL_GPL(ktime_get_real
);
695 * getrawmonotonic - Returns the raw monotonic time in a timespec
696 * @ts: pointer to the timespec to be set
698 * Returns the raw monotonic time (completely un-modified by ntp)
700 void getrawmonotonic(struct timespec
*ts
)
702 struct timekeeper
*tk
= &timekeeper
;
707 seq
= read_seqcount_begin(&timekeeper_seq
);
708 nsecs
= timekeeping_get_ns_raw(tk
);
711 } while (read_seqcount_retry(&timekeeper_seq
, seq
));
713 timespec_add_ns(ts
, nsecs
);
715 EXPORT_SYMBOL(getrawmonotonic
);
718 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
720 int timekeeping_valid_for_hres(void)
722 struct timekeeper
*tk
= &timekeeper
;
727 seq
= read_seqcount_begin(&timekeeper_seq
);
729 ret
= tk
->clock
->flags
& CLOCK_SOURCE_VALID_FOR_HRES
;
731 } while (read_seqcount_retry(&timekeeper_seq
, seq
));
737 * timekeeping_max_deferment - Returns max time the clocksource can be deferred
739 u64
timekeeping_max_deferment(void)
741 struct timekeeper
*tk
= &timekeeper
;
746 seq
= read_seqcount_begin(&timekeeper_seq
);
748 ret
= tk
->clock
->max_idle_ns
;
750 } while (read_seqcount_retry(&timekeeper_seq
, seq
));
756 * read_persistent_clock - Return time from the persistent clock.
758 * Weak dummy function for arches that do not yet support it.
759 * Reads the time from the battery backed persistent clock.
760 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
762 * XXX - Do be sure to remove it once all arches implement it.
764 void __weak
read_persistent_clock(struct timespec
*ts
)
771 * read_boot_clock - Return time of the system start.
773 * Weak dummy function for arches that do not yet support it.
774 * Function to read the exact time the system has been started.
775 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
777 * XXX - Do be sure to remove it once all arches implement it.
779 void __weak
read_boot_clock(struct timespec
*ts
)
786 * timekeeping_init - Initializes the clocksource and common timekeeping values
788 void __init
timekeeping_init(void)
790 struct timekeeper
*tk
= &timekeeper
;
791 struct clocksource
*clock
;
793 struct timespec now
, boot
, tmp
;
795 read_persistent_clock(&now
);
797 if (!timespec_valid_strict(&now
)) {
798 pr_warn("WARNING: Persistent clock returned invalid value!\n"
799 " Check your CMOS/BIOS settings.\n");
802 } else if (now
.tv_sec
|| now
.tv_nsec
)
803 persistent_clock_exist
= true;
805 read_boot_clock(&boot
);
806 if (!timespec_valid_strict(&boot
)) {
807 pr_warn("WARNING: Boot clock returned invalid value!\n"
808 " Check your CMOS/BIOS settings.\n");
813 raw_spin_lock_irqsave(&timekeeper_lock
, flags
);
814 write_seqcount_begin(&timekeeper_seq
);
817 clock
= clocksource_default_clock();
819 clock
->enable(clock
);
820 tk_setup_internals(tk
, clock
);
822 tk_set_xtime(tk
, &now
);
823 tk
->raw_time
.tv_sec
= 0;
824 tk
->raw_time
.tv_nsec
= 0;
825 if (boot
.tv_sec
== 0 && boot
.tv_nsec
== 0)
828 set_normalized_timespec(&tmp
, -boot
.tv_sec
, -boot
.tv_nsec
);
829 tk_set_wall_to_mono(tk
, tmp
);
833 tk_set_sleep_time(tk
, tmp
);
835 memcpy(&shadow_timekeeper
, &timekeeper
, sizeof(timekeeper
));
837 write_seqcount_end(&timekeeper_seq
);
838 raw_spin_unlock_irqrestore(&timekeeper_lock
, flags
);
841 /* time in seconds when suspend began */
842 static struct timespec timekeeping_suspend_time
;
845 * __timekeeping_inject_sleeptime - Internal function to add sleep interval
846 * @delta: pointer to a timespec delta value
848 * Takes a timespec offset measuring a suspend interval and properly
849 * adds the sleep offset to the timekeeping variables.
851 static void __timekeeping_inject_sleeptime(struct timekeeper
*tk
,
852 struct timespec
*delta
)
854 if (!timespec_valid_strict(delta
)) {
855 printk_deferred(KERN_WARNING
856 "__timekeeping_inject_sleeptime: Invalid "
857 "sleep delta value!\n");
860 tk_xtime_add(tk
, delta
);
861 tk_set_wall_to_mono(tk
, timespec_sub(tk
->wall_to_monotonic
, *delta
));
862 tk_set_sleep_time(tk
, timespec_add(tk
->total_sleep_time
, *delta
));
863 tk_debug_account_sleep_time(delta
);
867 * timekeeping_inject_sleeptime - Adds suspend interval to timeekeeping values
868 * @delta: pointer to a timespec delta value
870 * This hook is for architectures that cannot support read_persistent_clock
871 * because their RTC/persistent clock is only accessible when irqs are enabled.
873 * This function should only be called by rtc_resume(), and allows
874 * a suspend offset to be injected into the timekeeping values.
876 void timekeeping_inject_sleeptime(struct timespec
*delta
)
878 struct timekeeper
*tk
= &timekeeper
;
882 * Make sure we don't set the clock twice, as timekeeping_resume()
885 if (has_persistent_clock())
888 raw_spin_lock_irqsave(&timekeeper_lock
, flags
);
889 write_seqcount_begin(&timekeeper_seq
);
891 timekeeping_forward_now(tk
);
893 __timekeeping_inject_sleeptime(tk
, delta
);
895 timekeeping_update(tk
, TK_CLEAR_NTP
| TK_MIRROR
| TK_CLOCK_WAS_SET
);
897 write_seqcount_end(&timekeeper_seq
);
898 raw_spin_unlock_irqrestore(&timekeeper_lock
, flags
);
900 /* signal hrtimers about time change */
905 * timekeeping_resume - Resumes the generic timekeeping subsystem.
907 * This is for the generic clocksource timekeeping.
908 * xtime/wall_to_monotonic/jiffies/etc are
909 * still managed by arch specific suspend/resume code.
911 static void timekeeping_resume(void)
913 struct timekeeper
*tk
= &timekeeper
;
914 struct clocksource
*clock
= tk
->clock
;
916 struct timespec ts_new
, ts_delta
;
917 cycle_t cycle_now
, cycle_delta
;
918 bool suspendtime_found
= false;
920 read_persistent_clock(&ts_new
);
922 clockevents_resume();
923 clocksource_resume();
925 raw_spin_lock_irqsave(&timekeeper_lock
, flags
);
926 write_seqcount_begin(&timekeeper_seq
);
929 * After system resumes, we need to calculate the suspended time and
930 * compensate it for the OS time. There are 3 sources that could be
931 * used: Nonstop clocksource during suspend, persistent clock and rtc
934 * One specific platform may have 1 or 2 or all of them, and the
935 * preference will be:
936 * suspend-nonstop clocksource -> persistent clock -> rtc
937 * The less preferred source will only be tried if there is no better
938 * usable source. The rtc part is handled separately in rtc core code.
940 cycle_now
= clock
->read(clock
);
941 if ((clock
->flags
& CLOCK_SOURCE_SUSPEND_NONSTOP
) &&
942 cycle_now
> clock
->cycle_last
) {
943 u64 num
, max
= ULLONG_MAX
;
944 u32 mult
= clock
->mult
;
945 u32 shift
= clock
->shift
;
948 cycle_delta
= (cycle_now
- clock
->cycle_last
) & clock
->mask
;
951 * "cycle_delta * mutl" may cause 64 bits overflow, if the
952 * suspended time is too long. In that case we need do the
953 * 64 bits math carefully
956 if (cycle_delta
> max
) {
957 num
= div64_u64(cycle_delta
, max
);
958 nsec
= (((u64
) max
* mult
) >> shift
) * num
;
959 cycle_delta
-= num
* max
;
961 nsec
+= ((u64
) cycle_delta
* mult
) >> shift
;
963 ts_delta
= ns_to_timespec(nsec
);
964 suspendtime_found
= true;
965 } else if (timespec_compare(&ts_new
, &timekeeping_suspend_time
) > 0) {
966 ts_delta
= timespec_sub(ts_new
, timekeeping_suspend_time
);
967 suspendtime_found
= true;
970 if (suspendtime_found
)
971 __timekeeping_inject_sleeptime(tk
, &ts_delta
);
973 /* Re-base the last cycle value */
974 tk
->cycle_last
= clock
->cycle_last
= cycle_now
;
976 timekeeping_suspended
= 0;
977 timekeeping_update(tk
, TK_MIRROR
| TK_CLOCK_WAS_SET
);
978 write_seqcount_end(&timekeeper_seq
);
979 raw_spin_unlock_irqrestore(&timekeeper_lock
, flags
);
981 touch_softlockup_watchdog();
983 clockevents_notify(CLOCK_EVT_NOTIFY_RESUME
, NULL
);
985 /* Resume hrtimers */
989 static int timekeeping_suspend(void)
991 struct timekeeper
*tk
= &timekeeper
;
993 struct timespec delta
, delta_delta
;
994 static struct timespec old_delta
;
996 read_persistent_clock(&timekeeping_suspend_time
);
999 * On some systems the persistent_clock can not be detected at
1000 * timekeeping_init by its return value, so if we see a valid
1001 * value returned, update the persistent_clock_exists flag.
1003 if (timekeeping_suspend_time
.tv_sec
|| timekeeping_suspend_time
.tv_nsec
)
1004 persistent_clock_exist
= true;
1006 raw_spin_lock_irqsave(&timekeeper_lock
, flags
);
1007 write_seqcount_begin(&timekeeper_seq
);
1008 timekeeping_forward_now(tk
);
1009 timekeeping_suspended
= 1;
1012 * To avoid drift caused by repeated suspend/resumes,
1013 * which each can add ~1 second drift error,
1014 * try to compensate so the difference in system time
1015 * and persistent_clock time stays close to constant.
1017 delta
= timespec_sub(tk_xtime(tk
), timekeeping_suspend_time
);
1018 delta_delta
= timespec_sub(delta
, old_delta
);
1019 if (abs(delta_delta
.tv_sec
) >= 2) {
1021 * if delta_delta is too large, assume time correction
1022 * has occured and set old_delta to the current delta.
1026 /* Otherwise try to adjust old_system to compensate */
1027 timekeeping_suspend_time
=
1028 timespec_add(timekeeping_suspend_time
, delta_delta
);
1031 timekeeping_update(tk
, TK_MIRROR
);
1032 write_seqcount_end(&timekeeper_seq
);
1033 raw_spin_unlock_irqrestore(&timekeeper_lock
, flags
);
1035 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND
, NULL
);
1036 clocksource_suspend();
1037 clockevents_suspend();
1042 /* sysfs resume/suspend bits for timekeeping */
1043 static struct syscore_ops timekeeping_syscore_ops
= {
1044 .resume
= timekeeping_resume
,
1045 .suspend
= timekeeping_suspend
,
1048 static int __init
timekeeping_init_ops(void)
1050 register_syscore_ops(&timekeeping_syscore_ops
);
1054 device_initcall(timekeeping_init_ops
);
1057 * If the error is already larger, we look ahead even further
1058 * to compensate for late or lost adjustments.
1060 static __always_inline
int timekeeping_bigadjust(struct timekeeper
*tk
,
1061 s64 error
, s64
*interval
,
1065 u32 look_ahead
, adj
;
1069 * Use the current error value to determine how much to look ahead.
1070 * The larger the error the slower we adjust for it to avoid problems
1071 * with losing too many ticks, otherwise we would overadjust and
1072 * produce an even larger error. The smaller the adjustment the
1073 * faster we try to adjust for it, as lost ticks can do less harm
1074 * here. This is tuned so that an error of about 1 msec is adjusted
1075 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
1077 error2
= tk
->ntp_error
>> (NTP_SCALE_SHIFT
+ 22 - 2 * SHIFT_HZ
);
1078 error2
= abs(error2
);
1079 for (look_ahead
= 0; error2
> 0; look_ahead
++)
1083 * Now calculate the error in (1 << look_ahead) ticks, but first
1084 * remove the single look ahead already included in the error.
1086 tick_error
= ntp_tick_length() >> (tk
->ntp_error_shift
+ 1);
1087 tick_error
-= tk
->xtime_interval
>> 1;
1088 error
= ((error
- tick_error
) >> look_ahead
) + tick_error
;
1090 /* Finally calculate the adjustment shift value. */
1095 *interval
= -*interval
;
1099 for (adj
= 0; error
> i
; adj
++)
1108 * Adjust the multiplier to reduce the error value,
1109 * this is optimized for the most common adjustments of -1,0,1,
1110 * for other values we can do a bit more work.
1112 static void timekeeping_adjust(struct timekeeper
*tk
, s64 offset
)
1114 s64 error
, interval
= tk
->cycle_interval
;
1118 * The point of this is to check if the error is greater than half
1121 * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
1123 * Note we subtract one in the shift, so that error is really error*2.
1124 * This "saves" dividing(shifting) interval twice, but keeps the
1125 * (error > interval) comparison as still measuring if error is
1126 * larger than half an interval.
1128 * Note: It does not "save" on aggravation when reading the code.
1130 error
= tk
->ntp_error
>> (tk
->ntp_error_shift
- 1);
1131 if (error
> interval
) {
1133 * We now divide error by 4(via shift), which checks if
1134 * the error is greater than twice the interval.
1135 * If it is greater, we need a bigadjust, if its smaller,
1136 * we can adjust by 1.
1139 if (likely(error
<= interval
))
1142 adj
= timekeeping_bigadjust(tk
, error
, &interval
, &offset
);
1144 if (error
< -interval
) {
1145 /* See comment above, this is just switched for the negative */
1147 if (likely(error
>= -interval
)) {
1149 interval
= -interval
;
1152 adj
= timekeeping_bigadjust(tk
, error
, &interval
, &offset
);
1159 if (unlikely(tk
->clock
->maxadj
&&
1160 (tk
->mult
+ adj
> tk
->clock
->mult
+ tk
->clock
->maxadj
))) {
1161 printk_deferred_once(KERN_WARNING
1162 "Adjusting %s more than 11%% (%ld vs %ld)\n",
1163 tk
->clock
->name
, (long)tk
->mult
+ adj
,
1164 (long)tk
->clock
->mult
+ tk
->clock
->maxadj
);
1167 * So the following can be confusing.
1169 * To keep things simple, lets assume adj == 1 for now.
1171 * When adj != 1, remember that the interval and offset values
1172 * have been appropriately scaled so the math is the same.
1174 * The basic idea here is that we're increasing the multiplier
1175 * by one, this causes the xtime_interval to be incremented by
1176 * one cycle_interval. This is because:
1177 * xtime_interval = cycle_interval * mult
1178 * So if mult is being incremented by one:
1179 * xtime_interval = cycle_interval * (mult + 1)
1181 * xtime_interval = (cycle_interval * mult) + cycle_interval
1182 * Which can be shortened to:
1183 * xtime_interval += cycle_interval
1185 * So offset stores the non-accumulated cycles. Thus the current
1186 * time (in shifted nanoseconds) is:
1187 * now = (offset * adj) + xtime_nsec
1188 * Now, even though we're adjusting the clock frequency, we have
1189 * to keep time consistent. In other words, we can't jump back
1190 * in time, and we also want to avoid jumping forward in time.
1192 * So given the same offset value, we need the time to be the same
1193 * both before and after the freq adjustment.
1194 * now = (offset * adj_1) + xtime_nsec_1
1195 * now = (offset * adj_2) + xtime_nsec_2
1197 * (offset * adj_1) + xtime_nsec_1 =
1198 * (offset * adj_2) + xtime_nsec_2
1202 * (offset * adj_1) + xtime_nsec_1 =
1203 * (offset * (adj_1+1)) + xtime_nsec_2
1204 * (offset * adj_1) + xtime_nsec_1 =
1205 * (offset * adj_1) + offset + xtime_nsec_2
1206 * Canceling the sides:
1207 * xtime_nsec_1 = offset + xtime_nsec_2
1209 * xtime_nsec_2 = xtime_nsec_1 - offset
1210 * Which simplfies to:
1211 * xtime_nsec -= offset
1213 * XXX - TODO: Doc ntp_error calculation.
1216 tk
->xtime_interval
+= interval
;
1217 tk
->xtime_nsec
-= offset
;
1218 tk
->ntp_error
-= (interval
- offset
) << tk
->ntp_error_shift
;
1222 * It may be possible that when we entered this function, xtime_nsec
1223 * was very small. Further, if we're slightly speeding the clocksource
1224 * in the code above, its possible the required corrective factor to
1225 * xtime_nsec could cause it to underflow.
1227 * Now, since we already accumulated the second, cannot simply roll
1228 * the accumulated second back, since the NTP subsystem has been
1229 * notified via second_overflow. So instead we push xtime_nsec forward
1230 * by the amount we underflowed, and add that amount into the error.
1232 * We'll correct this error next time through this function, when
1233 * xtime_nsec is not as small.
1235 if (unlikely((s64
)tk
->xtime_nsec
< 0)) {
1236 s64 neg
= -(s64
)tk
->xtime_nsec
;
1238 tk
->ntp_error
+= neg
<< tk
->ntp_error_shift
;
1244 * accumulate_nsecs_to_secs - Accumulates nsecs into secs
1246 * Helper function that accumulates a the nsecs greater then a second
1247 * from the xtime_nsec field to the xtime_secs field.
1248 * It also calls into the NTP code to handle leapsecond processing.
1251 static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper
*tk
)
1253 u64 nsecps
= (u64
)NSEC_PER_SEC
<< tk
->shift
;
1254 unsigned int clock_set
= 0;
1256 while (tk
->xtime_nsec
>= nsecps
) {
1259 tk
->xtime_nsec
-= nsecps
;
1262 /* Figure out if its a leap sec and apply if needed */
1263 leap
= second_overflow(tk
->xtime_sec
);
1264 if (unlikely(leap
)) {
1267 tk
->xtime_sec
+= leap
;
1271 tk_set_wall_to_mono(tk
,
1272 timespec_sub(tk
->wall_to_monotonic
, ts
));
1274 __timekeeping_set_tai_offset(tk
, tk
->tai_offset
- leap
);
1276 clock_set
= TK_CLOCK_WAS_SET
;
1283 * logarithmic_accumulation - shifted accumulation of cycles
1285 * This functions accumulates a shifted interval of cycles into
1286 * into a shifted interval nanoseconds. Allows for O(log) accumulation
1289 * Returns the unconsumed cycles.
1291 static cycle_t
logarithmic_accumulation(struct timekeeper
*tk
, cycle_t offset
,
1293 unsigned int *clock_set
)
1295 cycle_t interval
= tk
->cycle_interval
<< shift
;
1298 /* If the offset is smaller then a shifted interval, do nothing */
1299 if (offset
< interval
)
1302 /* Accumulate one shifted interval */
1304 tk
->cycle_last
+= interval
;
1306 tk
->xtime_nsec
+= tk
->xtime_interval
<< shift
;
1307 *clock_set
|= accumulate_nsecs_to_secs(tk
);
1309 /* Accumulate raw time */
1310 raw_nsecs
= (u64
)tk
->raw_interval
<< shift
;
1311 raw_nsecs
+= tk
->raw_time
.tv_nsec
;
1312 if (raw_nsecs
>= NSEC_PER_SEC
) {
1313 u64 raw_secs
= raw_nsecs
;
1314 raw_nsecs
= do_div(raw_secs
, NSEC_PER_SEC
);
1315 tk
->raw_time
.tv_sec
+= raw_secs
;
1317 tk
->raw_time
.tv_nsec
= raw_nsecs
;
1319 /* Accumulate error between NTP and clock interval */
1320 tk
->ntp_error
+= ntp_tick_length() << shift
;
1321 tk
->ntp_error
-= (tk
->xtime_interval
+ tk
->xtime_remainder
) <<
1322 (tk
->ntp_error_shift
+ shift
);
1327 #ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
1328 static inline void old_vsyscall_fixup(struct timekeeper
*tk
)
1333 * Store only full nanoseconds into xtime_nsec after rounding
1334 * it up and add the remainder to the error difference.
1335 * XXX - This is necessary to avoid small 1ns inconsistnecies caused
1336 * by truncating the remainder in vsyscalls. However, it causes
1337 * additional work to be done in timekeeping_adjust(). Once
1338 * the vsyscall implementations are converted to use xtime_nsec
1339 * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
1340 * users are removed, this can be killed.
1342 remainder
= tk
->xtime_nsec
& ((1ULL << tk
->shift
) - 1);
1343 tk
->xtime_nsec
-= remainder
;
1344 tk
->xtime_nsec
+= 1ULL << tk
->shift
;
1345 tk
->ntp_error
+= remainder
<< tk
->ntp_error_shift
;
1346 tk
->ntp_error
-= (1ULL << tk
->shift
) << tk
->ntp_error_shift
;
1349 #define old_vsyscall_fixup(tk)
1355 * update_wall_time - Uses the current clocksource to increment the wall time
1358 void update_wall_time(void)
1360 struct clocksource
*clock
;
1361 struct timekeeper
*real_tk
= &timekeeper
;
1362 struct timekeeper
*tk
= &shadow_timekeeper
;
1364 int shift
= 0, maxshift
;
1365 unsigned int clock_set
= 0;
1366 unsigned long flags
;
1368 raw_spin_lock_irqsave(&timekeeper_lock
, flags
);
1370 /* Make sure we're fully resumed: */
1371 if (unlikely(timekeeping_suspended
))
1374 clock
= real_tk
->clock
;
1376 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
1377 offset
= real_tk
->cycle_interval
;
1379 offset
= (clock
->read(clock
) - clock
->cycle_last
) & clock
->mask
;
1382 /* Check if there's really nothing to do */
1383 if (offset
< real_tk
->cycle_interval
)
1387 * With NO_HZ we may have to accumulate many cycle_intervals
1388 * (think "ticks") worth of time at once. To do this efficiently,
1389 * we calculate the largest doubling multiple of cycle_intervals
1390 * that is smaller than the offset. We then accumulate that
1391 * chunk in one go, and then try to consume the next smaller
1394 shift
= ilog2(offset
) - ilog2(tk
->cycle_interval
);
1395 shift
= max(0, shift
);
1396 /* Bound shift to one less than what overflows tick_length */
1397 maxshift
= (64 - (ilog2(ntp_tick_length())+1)) - 1;
1398 shift
= min(shift
, maxshift
);
1399 while (offset
>= tk
->cycle_interval
) {
1400 offset
= logarithmic_accumulation(tk
, offset
, shift
,
1402 if (offset
< tk
->cycle_interval
<<shift
)
1406 /* correct the clock when NTP error is too big */
1407 timekeeping_adjust(tk
, offset
);
1410 * XXX This can be killed once everyone converts
1411 * to the new update_vsyscall.
1413 old_vsyscall_fixup(tk
);
1416 * Finally, make sure that after the rounding
1417 * xtime_nsec isn't larger than NSEC_PER_SEC
1419 clock_set
|= accumulate_nsecs_to_secs(tk
);
1421 write_seqcount_begin(&timekeeper_seq
);
1422 /* Update clock->cycle_last with the new value */
1423 clock
->cycle_last
= tk
->cycle_last
;
1425 * Update the real timekeeper.
1427 * We could avoid this memcpy by switching pointers, but that
1428 * requires changes to all other timekeeper usage sites as
1429 * well, i.e. move the timekeeper pointer getter into the
1430 * spinlocked/seqcount protected sections. And we trade this
1431 * memcpy under the timekeeper_seq against one before we start
1434 memcpy(real_tk
, tk
, sizeof(*tk
));
1435 timekeeping_update(real_tk
, clock_set
);
1436 write_seqcount_end(&timekeeper_seq
);
1438 raw_spin_unlock_irqrestore(&timekeeper_lock
, flags
);
1440 /* Have to call _delayed version, since in irq context*/
1441 clock_was_set_delayed();
1445 * getboottime - Return the real time of system boot.
1446 * @ts: pointer to the timespec to be set
1448 * Returns the wall-time of boot in a timespec.
1450 * This is based on the wall_to_monotonic offset and the total suspend
1451 * time. Calls to settimeofday will affect the value returned (which
1452 * basically means that however wrong your real time clock is at boot time,
1453 * you get the right time here).
1455 void getboottime(struct timespec
*ts
)
1457 struct timekeeper
*tk
= &timekeeper
;
1458 struct timespec boottime
= {
1459 .tv_sec
= tk
->wall_to_monotonic
.tv_sec
+
1460 tk
->total_sleep_time
.tv_sec
,
1461 .tv_nsec
= tk
->wall_to_monotonic
.tv_nsec
+
1462 tk
->total_sleep_time
.tv_nsec
1465 set_normalized_timespec(ts
, -boottime
.tv_sec
, -boottime
.tv_nsec
);
1467 EXPORT_SYMBOL_GPL(getboottime
);
1470 * get_monotonic_boottime - Returns monotonic time since boot
1471 * @ts: pointer to the timespec to be set
1473 * Returns the monotonic time since boot in a timespec.
1475 * This is similar to CLOCK_MONTONIC/ktime_get_ts, but also
1476 * includes the time spent in suspend.
1478 void get_monotonic_boottime(struct timespec
*ts
)
1480 struct timekeeper
*tk
= &timekeeper
;
1481 struct timespec tomono
, sleep
;
1485 WARN_ON(timekeeping_suspended
);
1488 seq
= read_seqcount_begin(&timekeeper_seq
);
1489 ts
->tv_sec
= tk
->xtime_sec
;
1490 nsec
= timekeeping_get_ns(tk
);
1491 tomono
= tk
->wall_to_monotonic
;
1492 sleep
= tk
->total_sleep_time
;
1494 } while (read_seqcount_retry(&timekeeper_seq
, seq
));
1496 ts
->tv_sec
+= tomono
.tv_sec
+ sleep
.tv_sec
;
1498 timespec_add_ns(ts
, nsec
+ tomono
.tv_nsec
+ sleep
.tv_nsec
);
1500 EXPORT_SYMBOL_GPL(get_monotonic_boottime
);
1503 * ktime_get_boottime - Returns monotonic time since boot in a ktime
1505 * Returns the monotonic time since boot in a ktime
1507 * This is similar to CLOCK_MONTONIC/ktime_get, but also
1508 * includes the time spent in suspend.
1510 ktime_t
ktime_get_boottime(void)
1514 get_monotonic_boottime(&ts
);
1515 return timespec_to_ktime(ts
);
1517 EXPORT_SYMBOL_GPL(ktime_get_boottime
);
1520 * monotonic_to_bootbased - Convert the monotonic time to boot based.
1521 * @ts: pointer to the timespec to be converted
1523 void monotonic_to_bootbased(struct timespec
*ts
)
1525 struct timekeeper
*tk
= &timekeeper
;
1527 *ts
= timespec_add(*ts
, tk
->total_sleep_time
);
1529 EXPORT_SYMBOL_GPL(monotonic_to_bootbased
);
1531 unsigned long get_seconds(void)
1533 struct timekeeper
*tk
= &timekeeper
;
1535 return tk
->xtime_sec
;
1537 EXPORT_SYMBOL(get_seconds
);
1539 struct timespec
__current_kernel_time(void)
1541 struct timekeeper
*tk
= &timekeeper
;
1543 return tk_xtime(tk
);
1546 struct timespec
current_kernel_time(void)
1548 struct timekeeper
*tk
= &timekeeper
;
1549 struct timespec now
;
1553 seq
= read_seqcount_begin(&timekeeper_seq
);
1556 } while (read_seqcount_retry(&timekeeper_seq
, seq
));
1560 EXPORT_SYMBOL(current_kernel_time
);
1562 struct timespec
get_monotonic_coarse(void)
1564 struct timekeeper
*tk
= &timekeeper
;
1565 struct timespec now
, mono
;
1569 seq
= read_seqcount_begin(&timekeeper_seq
);
1572 mono
= tk
->wall_to_monotonic
;
1573 } while (read_seqcount_retry(&timekeeper_seq
, seq
));
1575 set_normalized_timespec(&now
, now
.tv_sec
+ mono
.tv_sec
,
1576 now
.tv_nsec
+ mono
.tv_nsec
);
1581 * Must hold jiffies_lock
1583 void do_timer(unsigned long ticks
)
1585 jiffies_64
+= ticks
;
1586 calc_global_load(ticks
);
1590 * get_xtime_and_monotonic_and_sleep_offset() - get xtime, wall_to_monotonic,
1591 * and sleep offsets.
1592 * @xtim: pointer to timespec to be set with xtime
1593 * @wtom: pointer to timespec to be set with wall_to_monotonic
1594 * @sleep: pointer to timespec to be set with time in suspend
1596 void get_xtime_and_monotonic_and_sleep_offset(struct timespec
*xtim
,
1597 struct timespec
*wtom
, struct timespec
*sleep
)
1599 struct timekeeper
*tk
= &timekeeper
;
1603 seq
= read_seqcount_begin(&timekeeper_seq
);
1604 *xtim
= tk_xtime(tk
);
1605 *wtom
= tk
->wall_to_monotonic
;
1606 *sleep
= tk
->total_sleep_time
;
1607 } while (read_seqcount_retry(&timekeeper_seq
, seq
));
1610 #ifdef CONFIG_HIGH_RES_TIMERS
1612 * ktime_get_update_offsets - hrtimer helper
1613 * @offs_real: pointer to storage for monotonic -> realtime offset
1614 * @offs_boot: pointer to storage for monotonic -> boottime offset
1615 * @offs_tai: pointer to storage for monotonic -> clock tai offset
1617 * Returns current monotonic time and updates the offsets
1618 * Called from hrtimer_interrupt() or retrigger_next_event()
1620 ktime_t
ktime_get_update_offsets(ktime_t
*offs_real
, ktime_t
*offs_boot
,
1623 struct timekeeper
*tk
= &timekeeper
;
1629 seq
= read_seqcount_begin(&timekeeper_seq
);
1631 secs
= tk
->xtime_sec
;
1632 nsecs
= timekeeping_get_ns(tk
);
1634 *offs_real
= tk
->offs_real
;
1635 *offs_boot
= tk
->offs_boot
;
1636 *offs_tai
= tk
->offs_tai
;
1637 } while (read_seqcount_retry(&timekeeper_seq
, seq
));
1639 now
= ktime_add_ns(ktime_set(secs
, 0), nsecs
);
1640 now
= ktime_sub(now
, *offs_real
);
1646 * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format
1648 ktime_t
ktime_get_monotonic_offset(void)
1650 struct timekeeper
*tk
= &timekeeper
;
1652 struct timespec wtom
;
1655 seq
= read_seqcount_begin(&timekeeper_seq
);
1656 wtom
= tk
->wall_to_monotonic
;
1657 } while (read_seqcount_retry(&timekeeper_seq
, seq
));
1659 return timespec_to_ktime(wtom
);
1661 EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset
);
1664 * do_adjtimex() - Accessor function to NTP __do_adjtimex function
1666 int do_adjtimex(struct timex
*txc
)
1668 struct timekeeper
*tk
= &timekeeper
;
1669 unsigned long flags
;
1674 /* Validate the data before disabling interrupts */
1675 ret
= ntp_validate_timex(txc
);
1679 if (txc
->modes
& ADJ_SETOFFSET
) {
1680 struct timespec delta
;
1681 delta
.tv_sec
= txc
->time
.tv_sec
;
1682 delta
.tv_nsec
= txc
->time
.tv_usec
;
1683 if (!(txc
->modes
& ADJ_NANO
))
1684 delta
.tv_nsec
*= 1000;
1685 ret
= timekeeping_inject_offset(&delta
);
1690 getnstimeofday(&ts
);
1692 raw_spin_lock_irqsave(&timekeeper_lock
, flags
);
1693 write_seqcount_begin(&timekeeper_seq
);
1695 orig_tai
= tai
= tk
->tai_offset
;
1696 ret
= __do_adjtimex(txc
, &ts
, &tai
);
1698 if (tai
!= orig_tai
) {
1699 __timekeeping_set_tai_offset(tk
, tai
);
1700 timekeeping_update(tk
, TK_MIRROR
| TK_CLOCK_WAS_SET
);
1702 write_seqcount_end(&timekeeper_seq
);
1703 raw_spin_unlock_irqrestore(&timekeeper_lock
, flags
);
1705 if (tai
!= orig_tai
)
1708 ntp_notify_cmos_timer();
1713 #ifdef CONFIG_NTP_PPS
1715 * hardpps() - Accessor function to NTP __hardpps function
1717 void hardpps(const struct timespec
*phase_ts
, const struct timespec
*raw_ts
)
1719 unsigned long flags
;
1721 raw_spin_lock_irqsave(&timekeeper_lock
, flags
);
1722 write_seqcount_begin(&timekeeper_seq
);
1724 __hardpps(phase_ts
, raw_ts
);
1726 write_seqcount_end(&timekeeper_seq
);
1727 raw_spin_unlock_irqrestore(&timekeeper_lock
, flags
);
1729 EXPORT_SYMBOL(hardpps
);
1733 * xtime_update() - advances the timekeeping infrastructure
1734 * @ticks: number of ticks, that have elapsed since the last call.
1736 * Must be called with interrupts disabled.
1738 void xtime_update(unsigned long ticks
)
1740 write_seqlock(&jiffies_lock
);
1742 write_sequnlock(&jiffies_lock
);