Commit | Line | Data |
---|---|---|
8524070b | 1 | /* |
2 | * linux/kernel/time/timekeeping.c | |
3 | * | |
4 | * Kernel timekeeping code and accessor functions | |
5 | * | |
6 | * This code was moved from linux/kernel/timer.c. | |
7 | * Please see that file for copyright and history logs. | |
8 | * | |
9 | */ | |
10 | ||
11 | #include <linux/module.h> | |
12 | #include <linux/interrupt.h> | |
13 | #include <linux/percpu.h> | |
14 | #include <linux/init.h> | |
15 | #include <linux/mm.h> | |
16 | #include <linux/sysdev.h> | |
17 | #include <linux/clocksource.h> | |
18 | #include <linux/jiffies.h> | |
19 | #include <linux/time.h> | |
20 | #include <linux/tick.h> | |
21 | ||
22 | ||
23 | /* | |
24 | * This read-write spinlock protects us from races in SMP while | |
25 | * playing with xtime and avenrun. | |
26 | */ | |
ba2a631b | 27 | __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock); |
8524070b | 28 | |
29 | ||
30 | /* | |
31 | * The current time | |
32 | * wall_to_monotonic is what we need to add to xtime (or xtime corrected | |
33 | * for sub jiffie times) to get to monotonic time. Monotonic is pegged | |
34 | * at zero at system boot time, so wall_to_monotonic will be negative, | |
35 | * however, we will ALWAYS keep the tv_nsec part positive so we can use | |
36 | * the usual normalization. | |
7c3f1a57 TJ |
37 | * |
38 | * wall_to_monotonic is moved after resume from suspend for the monotonic | |
39 | * time not to jump. We need to add total_sleep_time to wall_to_monotonic | |
40 | * to get the real boot based time offset. | |
41 | * | |
42 | * - wall_to_monotonic is no longer the boot time, getboottime must be | |
43 | * used instead. | |
8524070b | 44 | */ |
45 | struct timespec xtime __attribute__ ((aligned (16))); | |
46 | struct timespec wall_to_monotonic __attribute__ ((aligned (16))); | |
7c3f1a57 | 47 | static unsigned long total_sleep_time; /* seconds */ |
8524070b | 48 | |
1c5745aa TG |
49 | /* flag for if timekeeping is suspended */ |
50 | int __read_mostly timekeeping_suspended; | |
51 | ||
17c38b74 | 52 | static struct timespec xtime_cache __attribute__ ((aligned (16))); |
1001d0a9 | 53 | void update_xtime_cache(u64 nsec) |
17c38b74 | 54 | { |
55 | xtime_cache = xtime; | |
56 | timespec_add_ns(&xtime_cache, nsec); | |
57 | } | |
17c38b74 | 58 | |
7dffa3c6 | 59 | struct clocksource *clock; |
8524070b | 60 | |
61 | ||
62 | #ifdef CONFIG_GENERIC_TIME | |
63 | /** | |
9a055117 | 64 | * clocksource_forward_now - update clock to the current time |
8524070b | 65 | * |
9a055117 RZ |
66 | * Forward the current clock to update its state since the last call to |
67 | * update_wall_time(). This is useful before significant clock changes, | |
68 | * as it avoids having to deal with this time offset explicitly. | |
8524070b | 69 | */ |
9a055117 | 70 | static void clocksource_forward_now(void) |
8524070b | 71 | { |
72 | cycle_t cycle_now, cycle_delta; | |
9a055117 | 73 | s64 nsec; |
8524070b | 74 | |
8524070b | 75 | cycle_now = clocksource_read(clock); |
8524070b | 76 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; |
9a055117 | 77 | clock->cycle_last = cycle_now; |
8524070b | 78 | |
9a055117 RZ |
79 | nsec = cyc2ns(clock, cycle_delta); |
80 | timespec_add_ns(&xtime, nsec); | |
2d42244a JS |
81 | |
82 | nsec = ((s64)cycle_delta * clock->mult_orig) >> clock->shift; | |
83 | clock->raw_time.tv_nsec += nsec; | |
8524070b | 84 | } |
85 | ||
86 | /** | |
efd9ac86 | 87 | * getnstimeofday - Returns the time of day in a timespec |
8524070b | 88 | * @ts: pointer to the timespec to be set |
89 | * | |
efd9ac86 | 90 | * Returns the time of day in a timespec. |
8524070b | 91 | */ |
efd9ac86 | 92 | void getnstimeofday(struct timespec *ts) |
8524070b | 93 | { |
9a055117 | 94 | cycle_t cycle_now, cycle_delta; |
8524070b | 95 | unsigned long seq; |
96 | s64 nsecs; | |
97 | ||
1c5745aa TG |
98 | WARN_ON(timekeeping_suspended); |
99 | ||
8524070b | 100 | do { |
101 | seq = read_seqbegin(&xtime_lock); | |
102 | ||
103 | *ts = xtime; | |
9a055117 RZ |
104 | |
105 | /* read clocksource: */ | |
106 | cycle_now = clocksource_read(clock); | |
107 | ||
108 | /* calculate the delta since the last update_wall_time: */ | |
109 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; | |
110 | ||
111 | /* convert to nanoseconds: */ | |
112 | nsecs = cyc2ns(clock, cycle_delta); | |
8524070b | 113 | |
114 | } while (read_seqretry(&xtime_lock, seq)); | |
115 | ||
116 | timespec_add_ns(ts, nsecs); | |
117 | } | |
118 | ||
8524070b | 119 | EXPORT_SYMBOL(getnstimeofday); |
120 | ||
121 | /** | |
122 | * do_gettimeofday - Returns the time of day in a timeval | |
123 | * @tv: pointer to the timeval to be set | |
124 | * | |
efd9ac86 | 125 | * NOTE: Users should be converted to using getnstimeofday() |
8524070b | 126 | */ |
127 | void do_gettimeofday(struct timeval *tv) | |
128 | { | |
129 | struct timespec now; | |
130 | ||
efd9ac86 | 131 | getnstimeofday(&now); |
8524070b | 132 | tv->tv_sec = now.tv_sec; |
133 | tv->tv_usec = now.tv_nsec/1000; | |
134 | } | |
135 | ||
136 | EXPORT_SYMBOL(do_gettimeofday); | |
137 | /** | |
138 | * do_settimeofday - Sets the time of day | |
139 | * @tv: pointer to the timespec variable containing the new time | |
140 | * | |
141 | * Sets the time of day to the new time and update NTP and notify hrtimers | |
142 | */ | |
143 | int do_settimeofday(struct timespec *tv) | |
144 | { | |
9a055117 | 145 | struct timespec ts_delta; |
8524070b | 146 | unsigned long flags; |
8524070b | 147 | |
148 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) | |
149 | return -EINVAL; | |
150 | ||
151 | write_seqlock_irqsave(&xtime_lock, flags); | |
152 | ||
9a055117 RZ |
153 | clocksource_forward_now(); |
154 | ||
155 | ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec; | |
156 | ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec; | |
157 | wall_to_monotonic = timespec_sub(wall_to_monotonic, ts_delta); | |
8524070b | 158 | |
9a055117 | 159 | xtime = *tv; |
8524070b | 160 | |
1001d0a9 | 161 | update_xtime_cache(0); |
8524070b | 162 | |
163 | clock->error = 0; | |
164 | ntp_clear(); | |
165 | ||
166 | update_vsyscall(&xtime, clock); | |
167 | ||
168 | write_sequnlock_irqrestore(&xtime_lock, flags); | |
169 | ||
170 | /* signal hrtimers about time change */ | |
171 | clock_was_set(); | |
172 | ||
173 | return 0; | |
174 | } | |
175 | ||
176 | EXPORT_SYMBOL(do_settimeofday); | |
177 | ||
178 | /** | |
179 | * change_clocksource - Swaps clocksources if a new one is available | |
180 | * | |
181 | * Accumulates current time interval and initializes new clocksource | |
182 | */ | |
183 | static void change_clocksource(void) | |
184 | { | |
4614e6ad | 185 | struct clocksource *new, *old; |
8524070b | 186 | |
187 | new = clocksource_get_next(); | |
188 | ||
189 | if (clock == new) | |
190 | return; | |
191 | ||
9a055117 | 192 | clocksource_forward_now(); |
8524070b | 193 | |
4614e6ad MD |
194 | if (clocksource_enable(new)) |
195 | return; | |
2d42244a | 196 | |
4614e6ad MD |
197 | new->raw_time = clock->raw_time; |
198 | old = clock; | |
8524070b | 199 | clock = new; |
4614e6ad MD |
200 | clocksource_disable(old); |
201 | ||
9a055117 | 202 | clock->cycle_last = 0; |
4614e6ad | 203 | clock->cycle_last = clocksource_read(clock); |
8524070b | 204 | clock->error = 0; |
205 | clock->xtime_nsec = 0; | |
10a398d0 | 206 | clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); |
8524070b | 207 | |
208 | tick_clock_notify(); | |
209 | ||
92896bd9 LT |
210 | /* |
211 | * We're holding xtime lock and waking up klogd would deadlock | |
212 | * us on enqueue. So no printing! | |
8524070b | 213 | printk(KERN_INFO "Time: %s clocksource has been installed.\n", |
214 | clock->name); | |
92896bd9 | 215 | */ |
8524070b | 216 | } |
217 | #else | |
9a055117 | 218 | static inline void clocksource_forward_now(void) { } |
8524070b | 219 | static inline void change_clocksource(void) { } |
220 | #endif | |
221 | ||
2d42244a JS |
222 | /** |
223 | * getrawmonotonic - Returns the raw monotonic time in a timespec | |
224 | * @ts: pointer to the timespec to be set | |
225 | * | |
226 | * Returns the raw monotonic time (completely un-modified by ntp) | |
227 | */ | |
228 | void getrawmonotonic(struct timespec *ts) | |
229 | { | |
230 | unsigned long seq; | |
231 | s64 nsecs; | |
232 | cycle_t cycle_now, cycle_delta; | |
233 | ||
234 | do { | |
235 | seq = read_seqbegin(&xtime_lock); | |
236 | ||
237 | /* read clocksource: */ | |
238 | cycle_now = clocksource_read(clock); | |
239 | ||
240 | /* calculate the delta since the last update_wall_time: */ | |
241 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; | |
242 | ||
243 | /* convert to nanoseconds: */ | |
244 | nsecs = ((s64)cycle_delta * clock->mult_orig) >> clock->shift; | |
245 | ||
246 | *ts = clock->raw_time; | |
247 | ||
248 | } while (read_seqretry(&xtime_lock, seq)); | |
249 | ||
250 | timespec_add_ns(ts, nsecs); | |
251 | } | |
252 | EXPORT_SYMBOL(getrawmonotonic); | |
253 | ||
254 | ||
8524070b | 255 | /** |
cf4fc6cb | 256 | * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres |
8524070b | 257 | */ |
cf4fc6cb | 258 | int timekeeping_valid_for_hres(void) |
8524070b | 259 | { |
260 | unsigned long seq; | |
261 | int ret; | |
262 | ||
263 | do { | |
264 | seq = read_seqbegin(&xtime_lock); | |
265 | ||
266 | ret = clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; | |
267 | ||
268 | } while (read_seqretry(&xtime_lock, seq)); | |
269 | ||
270 | return ret; | |
271 | } | |
272 | ||
273 | /** | |
274 | * read_persistent_clock - Return time in seconds from the persistent clock. | |
275 | * | |
276 | * Weak dummy function for arches that do not yet support it. | |
277 | * Returns seconds from epoch using the battery backed persistent clock. | |
278 | * Returns zero if unsupported. | |
279 | * | |
280 | * XXX - Do be sure to remove it once all arches implement it. | |
281 | */ | |
282 | unsigned long __attribute__((weak)) read_persistent_clock(void) | |
283 | { | |
284 | return 0; | |
285 | } | |
286 | ||
287 | /* | |
288 | * timekeeping_init - Initializes the clocksource and common timekeeping values | |
289 | */ | |
290 | void __init timekeeping_init(void) | |
291 | { | |
292 | unsigned long flags; | |
293 | unsigned long sec = read_persistent_clock(); | |
294 | ||
295 | write_seqlock_irqsave(&xtime_lock, flags); | |
296 | ||
7dffa3c6 | 297 | ntp_init(); |
8524070b | 298 | |
299 | clock = clocksource_get_next(); | |
4614e6ad | 300 | clocksource_enable(clock); |
10a398d0 | 301 | clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); |
8524070b | 302 | clock->cycle_last = clocksource_read(clock); |
303 | ||
304 | xtime.tv_sec = sec; | |
305 | xtime.tv_nsec = 0; | |
306 | set_normalized_timespec(&wall_to_monotonic, | |
307 | -xtime.tv_sec, -xtime.tv_nsec); | |
1001d0a9 | 308 | update_xtime_cache(0); |
7c3f1a57 | 309 | total_sleep_time = 0; |
8524070b | 310 | write_sequnlock_irqrestore(&xtime_lock, flags); |
311 | } | |
312 | ||
8524070b | 313 | /* time in seconds when suspend began */ |
314 | static unsigned long timekeeping_suspend_time; | |
315 | ||
316 | /** | |
317 | * timekeeping_resume - Resumes the generic timekeeping subsystem. | |
318 | * @dev: unused | |
319 | * | |
320 | * This is for the generic clocksource timekeeping. | |
321 | * xtime/wall_to_monotonic/jiffies/etc are | |
322 | * still managed by arch specific suspend/resume code. | |
323 | */ | |
324 | static int timekeeping_resume(struct sys_device *dev) | |
325 | { | |
326 | unsigned long flags; | |
327 | unsigned long now = read_persistent_clock(); | |
328 | ||
d10ff3fb TG |
329 | clocksource_resume(); |
330 | ||
8524070b | 331 | write_seqlock_irqsave(&xtime_lock, flags); |
332 | ||
333 | if (now && (now > timekeeping_suspend_time)) { | |
334 | unsigned long sleep_length = now - timekeeping_suspend_time; | |
335 | ||
336 | xtime.tv_sec += sleep_length; | |
337 | wall_to_monotonic.tv_sec -= sleep_length; | |
7c3f1a57 | 338 | total_sleep_time += sleep_length; |
8524070b | 339 | } |
1001d0a9 | 340 | update_xtime_cache(0); |
8524070b | 341 | /* re-base the last cycle value */ |
d8bb6f4c | 342 | clock->cycle_last = 0; |
8524070b | 343 | clock->cycle_last = clocksource_read(clock); |
344 | clock->error = 0; | |
345 | timekeeping_suspended = 0; | |
346 | write_sequnlock_irqrestore(&xtime_lock, flags); | |
347 | ||
348 | touch_softlockup_watchdog(); | |
349 | ||
350 | clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL); | |
351 | ||
352 | /* Resume hrtimers */ | |
353 | hres_timers_resume(); | |
354 | ||
355 | return 0; | |
356 | } | |
357 | ||
358 | static int timekeeping_suspend(struct sys_device *dev, pm_message_t state) | |
359 | { | |
360 | unsigned long flags; | |
361 | ||
3be90950 TG |
362 | timekeeping_suspend_time = read_persistent_clock(); |
363 | ||
8524070b | 364 | write_seqlock_irqsave(&xtime_lock, flags); |
9a055117 | 365 | clocksource_forward_now(); |
8524070b | 366 | timekeeping_suspended = 1; |
8524070b | 367 | write_sequnlock_irqrestore(&xtime_lock, flags); |
368 | ||
369 | clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); | |
370 | ||
371 | return 0; | |
372 | } | |
373 | ||
374 | /* sysfs resume/suspend bits for timekeeping */ | |
375 | static struct sysdev_class timekeeping_sysclass = { | |
af5ca3f4 | 376 | .name = "timekeeping", |
8524070b | 377 | .resume = timekeeping_resume, |
378 | .suspend = timekeeping_suspend, | |
8524070b | 379 | }; |
380 | ||
381 | static struct sys_device device_timer = { | |
382 | .id = 0, | |
383 | .cls = &timekeeping_sysclass, | |
384 | }; | |
385 | ||
386 | static int __init timekeeping_init_device(void) | |
387 | { | |
388 | int error = sysdev_class_register(&timekeeping_sysclass); | |
389 | if (!error) | |
390 | error = sysdev_register(&device_timer); | |
391 | return error; | |
392 | } | |
393 | ||
394 | device_initcall(timekeeping_init_device); | |
395 | ||
396 | /* | |
397 | * If the error is already larger, we look ahead even further | |
398 | * to compensate for late or lost adjustments. | |
399 | */ | |
400 | static __always_inline int clocksource_bigadjust(s64 error, s64 *interval, | |
401 | s64 *offset) | |
402 | { | |
403 | s64 tick_error, i; | |
404 | u32 look_ahead, adj; | |
405 | s32 error2, mult; | |
406 | ||
407 | /* | |
408 | * Use the current error value to determine how much to look ahead. | |
409 | * The larger the error the slower we adjust for it to avoid problems | |
410 | * with losing too many ticks, otherwise we would overadjust and | |
411 | * produce an even larger error. The smaller the adjustment the | |
412 | * faster we try to adjust for it, as lost ticks can do less harm | |
3eb05676 | 413 | * here. This is tuned so that an error of about 1 msec is adjusted |
8524070b | 414 | * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks). |
415 | */ | |
7fc5c784 | 416 | error2 = clock->error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ); |
8524070b | 417 | error2 = abs(error2); |
418 | for (look_ahead = 0; error2 > 0; look_ahead++) | |
419 | error2 >>= 2; | |
420 | ||
421 | /* | |
422 | * Now calculate the error in (1 << look_ahead) ticks, but first | |
423 | * remove the single look ahead already included in the error. | |
424 | */ | |
8383c423 | 425 | tick_error = tick_length >> (NTP_SCALE_SHIFT - clock->shift + 1); |
8524070b | 426 | tick_error -= clock->xtime_interval >> 1; |
427 | error = ((error - tick_error) >> look_ahead) + tick_error; | |
428 | ||
429 | /* Finally calculate the adjustment shift value. */ | |
430 | i = *interval; | |
431 | mult = 1; | |
432 | if (error < 0) { | |
433 | error = -error; | |
434 | *interval = -*interval; | |
435 | *offset = -*offset; | |
436 | mult = -1; | |
437 | } | |
438 | for (adj = 0; error > i; adj++) | |
439 | error >>= 1; | |
440 | ||
441 | *interval <<= adj; | |
442 | *offset <<= adj; | |
443 | return mult << adj; | |
444 | } | |
445 | ||
446 | /* | |
447 | * Adjust the multiplier to reduce the error value, | |
448 | * this is optimized for the most common adjustments of -1,0,1, | |
449 | * for other values we can do a bit more work. | |
450 | */ | |
71120f18 | 451 | static void clocksource_adjust(s64 offset) |
8524070b | 452 | { |
453 | s64 error, interval = clock->cycle_interval; | |
454 | int adj; | |
455 | ||
7fc5c784 | 456 | error = clock->error >> (NTP_SCALE_SHIFT - clock->shift - 1); |
8524070b | 457 | if (error > interval) { |
458 | error >>= 2; | |
459 | if (likely(error <= interval)) | |
460 | adj = 1; | |
461 | else | |
462 | adj = clocksource_bigadjust(error, &interval, &offset); | |
463 | } else if (error < -interval) { | |
464 | error >>= 2; | |
465 | if (likely(error >= -interval)) { | |
466 | adj = -1; | |
467 | interval = -interval; | |
468 | offset = -offset; | |
469 | } else | |
470 | adj = clocksource_bigadjust(error, &interval, &offset); | |
471 | } else | |
472 | return; | |
473 | ||
474 | clock->mult += adj; | |
475 | clock->xtime_interval += interval; | |
476 | clock->xtime_nsec -= offset; | |
477 | clock->error -= (interval - offset) << | |
7fc5c784 | 478 | (NTP_SCALE_SHIFT - clock->shift); |
8524070b | 479 | } |
480 | ||
481 | /** | |
482 | * update_wall_time - Uses the current clocksource to increment the wall time | |
483 | * | |
484 | * Called from the timer interrupt, must hold a write on xtime_lock. | |
485 | */ | |
486 | void update_wall_time(void) | |
487 | { | |
488 | cycle_t offset; | |
489 | ||
490 | /* Make sure we're fully resumed: */ | |
491 | if (unlikely(timekeeping_suspended)) | |
492 | return; | |
493 | ||
494 | #ifdef CONFIG_GENERIC_TIME | |
495 | offset = (clocksource_read(clock) - clock->cycle_last) & clock->mask; | |
496 | #else | |
497 | offset = clock->cycle_interval; | |
498 | #endif | |
5cd1c9c5 | 499 | clock->xtime_nsec = (s64)xtime.tv_nsec << clock->shift; |
8524070b | 500 | |
501 | /* normally this loop will run just once, however in the | |
502 | * case of lost or late ticks, it will accumulate correctly. | |
503 | */ | |
504 | while (offset >= clock->cycle_interval) { | |
505 | /* accumulate one interval */ | |
8524070b | 506 | offset -= clock->cycle_interval; |
9a055117 | 507 | clock->cycle_last += clock->cycle_interval; |
8524070b | 508 | |
9a055117 | 509 | clock->xtime_nsec += clock->xtime_interval; |
8524070b | 510 | if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) { |
511 | clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift; | |
512 | xtime.tv_sec++; | |
513 | second_overflow(); | |
514 | } | |
515 | ||
2d42244a JS |
516 | clock->raw_time.tv_nsec += clock->raw_interval; |
517 | if (clock->raw_time.tv_nsec >= NSEC_PER_SEC) { | |
518 | clock->raw_time.tv_nsec -= NSEC_PER_SEC; | |
519 | clock->raw_time.tv_sec++; | |
520 | } | |
521 | ||
8524070b | 522 | /* accumulate error between NTP and clock interval */ |
8383c423 | 523 | clock->error += tick_length; |
7fc5c784 | 524 | clock->error -= clock->xtime_interval << (NTP_SCALE_SHIFT - clock->shift); |
8524070b | 525 | } |
526 | ||
527 | /* correct the clock when NTP error is too big */ | |
71120f18 | 528 | clocksource_adjust(offset); |
8524070b | 529 | |
6c9bacb4 | 530 | /* |
531 | * Since in the loop above, we accumulate any amount of time | |
532 | * in xtime_nsec over a second into xtime.tv_sec, its possible for | |
533 | * xtime_nsec to be fairly small after the loop. Further, if we're | |
534 | * slightly speeding the clocksource up in clocksource_adjust(), | |
535 | * its possible the required corrective factor to xtime_nsec could | |
536 | * cause it to underflow. | |
537 | * | |
538 | * Now, we cannot simply roll the accumulated second back, since | |
539 | * the NTP subsystem has been notified via second_overflow. So | |
540 | * instead we push xtime_nsec forward by the amount we underflowed, | |
541 | * and add that amount into the error. | |
542 | * | |
543 | * We'll correct this error next time through this function, when | |
544 | * xtime_nsec is not as small. | |
545 | */ | |
546 | if (unlikely((s64)clock->xtime_nsec < 0)) { | |
547 | s64 neg = -(s64)clock->xtime_nsec; | |
548 | clock->xtime_nsec = 0; | |
549 | clock->error += neg << (NTP_SCALE_SHIFT - clock->shift); | |
550 | } | |
551 | ||
5cd1c9c5 RZ |
552 | /* store full nanoseconds into xtime after rounding it up and |
553 | * add the remainder to the error difference. | |
554 | */ | |
555 | xtime.tv_nsec = ((s64)clock->xtime_nsec >> clock->shift) + 1; | |
8524070b | 556 | clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift; |
5cd1c9c5 | 557 | clock->error += clock->xtime_nsec << (NTP_SCALE_SHIFT - clock->shift); |
8524070b | 558 | |
17c38b74 | 559 | update_xtime_cache(cyc2ns(clock, offset)); |
560 | ||
8524070b | 561 | /* check to see if there is a new clocksource to use */ |
562 | change_clocksource(); | |
563 | update_vsyscall(&xtime, clock); | |
564 | } | |
7c3f1a57 TJ |
565 | |
566 | /** | |
567 | * getboottime - Return the real time of system boot. | |
568 | * @ts: pointer to the timespec to be set | |
569 | * | |
570 | * Returns the time of day in a timespec. | |
571 | * | |
572 | * This is based on the wall_to_monotonic offset and the total suspend | |
573 | * time. Calls to settimeofday will affect the value returned (which | |
574 | * basically means that however wrong your real time clock is at boot time, | |
575 | * you get the right time here). | |
576 | */ | |
577 | void getboottime(struct timespec *ts) | |
578 | { | |
579 | set_normalized_timespec(ts, | |
580 | - (wall_to_monotonic.tv_sec + total_sleep_time), | |
581 | - wall_to_monotonic.tv_nsec); | |
582 | } | |
583 | ||
584 | /** | |
585 | * monotonic_to_bootbased - Convert the monotonic time to boot based. | |
586 | * @ts: pointer to the timespec to be converted | |
587 | */ | |
588 | void monotonic_to_bootbased(struct timespec *ts) | |
589 | { | |
590 | ts->tv_sec += total_sleep_time; | |
591 | } | |
2c6b47de | 592 | |
17c38b74 | 593 | unsigned long get_seconds(void) |
594 | { | |
595 | return xtime_cache.tv_sec; | |
596 | } | |
597 | EXPORT_SYMBOL(get_seconds); | |
598 | ||
599 | ||
2c6b47de | 600 | struct timespec current_kernel_time(void) |
601 | { | |
602 | struct timespec now; | |
603 | unsigned long seq; | |
604 | ||
605 | do { | |
606 | seq = read_seqbegin(&xtime_lock); | |
607 | ||
17c38b74 | 608 | now = xtime_cache; |
2c6b47de | 609 | } while (read_seqretry(&xtime_lock, seq)); |
610 | ||
611 | return now; | |
612 | } | |
2c6b47de | 613 | EXPORT_SYMBOL(current_kernel_time); |