Commit | Line | Data |
---|---|---|
734efb46 | 1 | /* linux/include/linux/clocksource.h |
2 | * | |
3 | * This file contains the structure definitions for clocksources. | |
4 | * | |
5 | * If you are not a clocksource, or timekeeping code, you should | |
6 | * not be including this file! | |
7 | */ | |
8 | #ifndef _LINUX_CLOCKSOURCE_H | |
9 | #define _LINUX_CLOCKSOURCE_H | |
10 | ||
11 | #include <linux/types.h> | |
12 | #include <linux/timex.h> | |
13 | #include <linux/time.h> | |
14 | #include <linux/list.h> | |
329c8d84 | 15 | #include <linux/cache.h> |
5d8b34fd | 16 | #include <linux/timer.h> |
734efb46 | 17 | #include <asm/div64.h> |
18 | #include <asm/io.h> | |
19 | ||
20 | /* clocksource cycle base type */ | |
21 | typedef u64 cycle_t; | |
5d8b34fd | 22 | struct clocksource; |
734efb46 | 23 | |
a038a353 PO |
24 | /** |
25 | * struct cyclecounter - hardware abstraction for a free running counter | |
26 | * Provides completely state-free accessors to the underlying hardware. | |
27 | * Depending on which hardware it reads, the cycle counter may wrap | |
28 | * around quickly. Locking rules (if necessary) have to be defined | |
29 | * by the implementor and user of specific instances of this API. | |
30 | * | |
31 | * @read: returns the current cycle value | |
32 | * @mask: bitmask for two's complement | |
33 | * subtraction of non 64 bit counters, | |
34 | * see CLOCKSOURCE_MASK() helper macro | |
35 | * @mult: cycle to nanosecond multiplier | |
36 | * @shift: cycle to nanosecond divisor (power of two) | |
37 | */ | |
38 | struct cyclecounter { | |
39 | cycle_t (*read)(const struct cyclecounter *cc); | |
40 | cycle_t mask; | |
41 | u32 mult; | |
42 | u32 shift; | |
43 | }; | |
44 | ||
45 | /** | |
46 | * struct timecounter - layer above a %struct cyclecounter which counts nanoseconds | |
47 | * Contains the state needed by timecounter_read() to detect | |
48 | * cycle counter wrap around. Initialize with | |
49 | * timecounter_init(). Also used to convert cycle counts into the | |
50 | * corresponding nanosecond counts with timecounter_cyc2time(). Users | |
51 | * of this code are responsible for initializing the underlying | |
52 | * cycle counter hardware, locking issues and reading the time | |
53 | * more often than the cycle counter wraps around. The nanosecond | |
54 | * counter will only wrap around after ~585 years. | |
55 | * | |
56 | * @cc: the cycle counter used by this instance | |
57 | * @cycle_last: most recent cycle counter value seen by | |
58 | * timecounter_read() | |
59 | * @nsec: continuously increasing count | |
60 | */ | |
61 | struct timecounter { | |
62 | const struct cyclecounter *cc; | |
63 | cycle_t cycle_last; | |
64 | u64 nsec; | |
65 | }; | |
66 | ||
67 | /** | |
68 | * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds | |
69 | * @tc: Pointer to cycle counter. | |
70 | * @cycles: Cycles | |
71 | * | |
72 | * XXX - This could use some mult_lxl_ll() asm optimization. Same code | |
73 | * as in cyc2ns, but with unsigned result. | |
74 | */ | |
75 | static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc, | |
76 | cycle_t cycles) | |
77 | { | |
78 | u64 ret = (u64)cycles; | |
79 | ret = (ret * cc->mult) >> cc->shift; | |
80 | return ret; | |
81 | } | |
82 | ||
83 | /** | |
84 | * timecounter_init - initialize a time counter | |
85 | * @tc: Pointer to time counter which is to be initialized/reset | |
86 | * @cc: A cycle counter, ready to be used. | |
87 | * @start_tstamp: Arbitrary initial time stamp. | |
88 | * | |
89 | * After this call the current cycle register (roughly) corresponds to | |
90 | * the initial time stamp. Every call to timecounter_read() increments | |
91 | * the time stamp counter by the number of elapsed nanoseconds. | |
92 | */ | |
93 | extern void timecounter_init(struct timecounter *tc, | |
94 | const struct cyclecounter *cc, | |
95 | u64 start_tstamp); | |
96 | ||
97 | /** | |
98 | * timecounter_read - return nanoseconds elapsed since timecounter_init() | |
99 | * plus the initial time stamp | |
100 | * @tc: Pointer to time counter. | |
101 | * | |
102 | * In other words, keeps track of time since the same epoch as | |
103 | * the function which generated the initial time stamp. | |
104 | */ | |
105 | extern u64 timecounter_read(struct timecounter *tc); | |
106 | ||
107 | /** | |
108 | * timecounter_cyc2time - convert a cycle counter to same | |
109 | * time base as values returned by | |
110 | * timecounter_read() | |
111 | * @tc: Pointer to time counter. | |
112 | * @cycle: a value returned by tc->cc->read() | |
113 | * | |
114 | * Cycle counts that are converted correctly as long as they | |
115 | * fall into the interval [-1/2 max cycle count, +1/2 max cycle count], | |
116 | * with "max cycle count" == cs->mask+1. | |
117 | * | |
118 | * This allows conversion of cycle counter values which were generated | |
119 | * in the past. | |
120 | */ | |
121 | extern u64 timecounter_cyc2time(struct timecounter *tc, | |
122 | cycle_t cycle_tstamp); | |
123 | ||
734efb46 | 124 | /** |
125 | * struct clocksource - hardware abstraction for a free running counter | |
126 | * Provides mostly state-free accessors to the underlying hardware. | |
a038a353 | 127 | * This is the structure used for system time. |
734efb46 | 128 | * |
129 | * @name: ptr to clocksource name | |
130 | * @list: list head for registration | |
131 | * @rating: rating value for selection (higher is better) | |
132 | * To avoid rating inflation the following | |
133 | * list should give you a guide as to how | |
134 | * to assign your clocksource a rating | |
135 | * 1-99: Unfit for real use | |
136 | * Only available for bootup and testing purposes. | |
137 | * 100-199: Base level usability. | |
138 | * Functional for real use, but not desired. | |
139 | * 200-299: Good. | |
140 | * A correct and usable clocksource. | |
141 | * 300-399: Desired. | |
142 | * A reasonably fast and accurate clocksource. | |
143 | * 400-499: Perfect | |
144 | * The ideal clocksource. A must-use where | |
145 | * available. | |
8e19608e | 146 | * @read: returns a cycle value, passes clocksource as argument |
4614e6ad MD |
147 | * @enable: optional function to enable the clocksource |
148 | * @disable: optional function to disable the clocksource | |
734efb46 | 149 | * @mask: bitmask for two's complement |
150 | * subtraction of non 64 bit counters | |
1aa5dfb7 JS |
151 | * @mult: cycle to nanosecond multiplier (adjusted by NTP) |
152 | * @mult_orig: cycle to nanosecond multiplier (unadjusted by NTP) | |
734efb46 | 153 | * @shift: cycle to nanosecond divisor (power of two) |
73b08d2a | 154 | * @flags: flags describing special properties |
acc9a9dc | 155 | * @vread: vsyscall based read |
b52f52a0 | 156 | * @resume: resume function for the clocksource, if necessary |
19923c19 RZ |
157 | * @cycle_interval: Used internally by timekeeping core, please ignore. |
158 | * @xtime_interval: Used internally by timekeeping core, please ignore. | |
734efb46 | 159 | */ |
160 | struct clocksource { | |
329c8d84 ED |
161 | /* |
162 | * First part of structure is read mostly | |
163 | */ | |
734efb46 | 164 | char *name; |
165 | struct list_head list; | |
166 | int rating; | |
8e19608e | 167 | cycle_t (*read)(struct clocksource *cs); |
4614e6ad MD |
168 | int (*enable)(struct clocksource *cs); |
169 | void (*disable)(struct clocksource *cs); | |
734efb46 | 170 | cycle_t mask; |
171 | u32 mult; | |
1aa5dfb7 | 172 | u32 mult_orig; |
734efb46 | 173 | u32 shift; |
73b08d2a | 174 | unsigned long flags; |
acc9a9dc | 175 | cycle_t (*vread)(void); |
b52f52a0 | 176 | void (*resume)(void); |
0aa366f3 TL |
177 | #ifdef CONFIG_IA64 |
178 | void *fsys_mmio; /* used by fsyscall asm code */ | |
179 | #define CLKSRC_FSYS_MMIO_SET(mmio, addr) ((mmio) = (addr)) | |
180 | #else | |
181 | #define CLKSRC_FSYS_MMIO_SET(mmio, addr) do { } while (0) | |
182 | #endif | |
734efb46 | 183 | |
184 | /* timekeeping specific data, ignore */ | |
329c8d84 ED |
185 | cycle_t cycle_interval; |
186 | u64 xtime_interval; | |
2d42244a | 187 | u32 raw_interval; |
329c8d84 ED |
188 | /* |
189 | * Second part is written at each timer interrupt | |
190 | * Keep it in a different cache line to dirty no | |
191 | * more than one cache line. | |
192 | */ | |
193 | cycle_t cycle_last ____cacheline_aligned_in_smp; | |
194 | u64 xtime_nsec; | |
19923c19 | 195 | s64 error; |
2d42244a | 196 | struct timespec raw_time; |
5d8b34fd TG |
197 | |
198 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG | |
199 | /* Watchdog related data, used by the framework */ | |
200 | struct list_head wd_list; | |
201 | cycle_t wd_last; | |
202 | #endif | |
734efb46 | 203 | }; |
204 | ||
7dffa3c6 RZ |
205 | extern struct clocksource *clock; /* current clocksource */ |
206 | ||
73b08d2a TG |
207 | /* |
208 | * Clock source flags bits:: | |
209 | */ | |
5d8b34fd TG |
210 | #define CLOCK_SOURCE_IS_CONTINUOUS 0x01 |
211 | #define CLOCK_SOURCE_MUST_VERIFY 0x02 | |
212 | ||
213 | #define CLOCK_SOURCE_WATCHDOG 0x10 | |
214 | #define CLOCK_SOURCE_VALID_FOR_HRES 0x20 | |
73b08d2a | 215 | |
7f9f303a | 216 | /* simplify initialization of mask field */ |
1d76c262 | 217 | #define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) |
734efb46 | 218 | |
219 | /** | |
220 | * clocksource_khz2mult - calculates mult from khz and shift | |
221 | * @khz: Clocksource frequency in KHz | |
222 | * @shift_constant: Clocksource shift factor | |
223 | * | |
224 | * Helper functions that converts a khz counter frequency to a timsource | |
225 | * multiplier, given the clocksource shift value | |
226 | */ | |
227 | static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant) | |
228 | { | |
229 | /* khz = cyc/(Million ns) | |
230 | * mult/2^shift = ns/cyc | |
231 | * mult = ns/cyc * 2^shift | |
232 | * mult = 1Million/khz * 2^shift | |
233 | * mult = 1000000 * 2^shift / khz | |
234 | * mult = (1000000<<shift) / khz | |
235 | */ | |
236 | u64 tmp = ((u64)1000000) << shift_constant; | |
237 | ||
238 | tmp += khz/2; /* round for do_div */ | |
239 | do_div(tmp, khz); | |
240 | ||
241 | return (u32)tmp; | |
242 | } | |
243 | ||
244 | /** | |
245 | * clocksource_hz2mult - calculates mult from hz and shift | |
246 | * @hz: Clocksource frequency in Hz | |
247 | * @shift_constant: Clocksource shift factor | |
248 | * | |
249 | * Helper functions that converts a hz counter | |
250 | * frequency to a timsource multiplier, given the | |
251 | * clocksource shift value | |
252 | */ | |
253 | static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant) | |
254 | { | |
255 | /* hz = cyc/(Billion ns) | |
256 | * mult/2^shift = ns/cyc | |
257 | * mult = ns/cyc * 2^shift | |
258 | * mult = 1Billion/hz * 2^shift | |
259 | * mult = 1000000000 * 2^shift / hz | |
260 | * mult = (1000000000<<shift) / hz | |
261 | */ | |
262 | u64 tmp = ((u64)1000000000) << shift_constant; | |
263 | ||
264 | tmp += hz/2; /* round for do_div */ | |
265 | do_div(tmp, hz); | |
266 | ||
267 | return (u32)tmp; | |
268 | } | |
269 | ||
270 | /** | |
a2752549 | 271 | * clocksource_read: - Access the clocksource's current cycle value |
734efb46 | 272 | * @cs: pointer to clocksource being read |
273 | * | |
274 | * Uses the clocksource to return the current cycle_t value | |
275 | */ | |
a2752549 | 276 | static inline cycle_t clocksource_read(struct clocksource *cs) |
734efb46 | 277 | { |
8e19608e | 278 | return cs->read(cs); |
734efb46 | 279 | } |
280 | ||
4614e6ad MD |
281 | /** |
282 | * clocksource_enable: - enable clocksource | |
283 | * @cs: pointer to clocksource | |
284 | * | |
285 | * Enables the specified clocksource. The clocksource callback | |
286 | * function should start up the hardware and setup mult and field | |
287 | * members of struct clocksource to reflect hardware capabilities. | |
288 | */ | |
289 | static inline int clocksource_enable(struct clocksource *cs) | |
290 | { | |
a25cbd04 MD |
291 | int ret = 0; |
292 | ||
293 | if (cs->enable) | |
294 | ret = cs->enable(cs); | |
295 | ||
c7121843 MD |
296 | /* |
297 | * The frequency may have changed while the clocksource | |
298 | * was disabled. If so the code in ->enable() must update | |
299 | * the mult value to reflect the new frequency. Make sure | |
300 | * mult_orig follows this change. | |
301 | */ | |
a25cbd04 MD |
302 | cs->mult_orig = cs->mult; |
303 | ||
304 | return ret; | |
4614e6ad MD |
305 | } |
306 | ||
307 | /** | |
308 | * clocksource_disable: - disable clocksource | |
309 | * @cs: pointer to clocksource | |
310 | * | |
311 | * Disables the specified clocksource. The clocksource callback | |
312 | * function should power down the now unused hardware block to | |
313 | * save power. | |
314 | */ | |
315 | static inline void clocksource_disable(struct clocksource *cs) | |
316 | { | |
c7121843 MD |
317 | /* |
318 | * Save mult_orig in mult so clocksource_enable() can | |
319 | * restore the value regardless if ->enable() updates | |
320 | * the value of mult or not. | |
321 | */ | |
322 | cs->mult = cs->mult_orig; | |
323 | ||
4614e6ad MD |
324 | if (cs->disable) |
325 | cs->disable(cs); | |
326 | } | |
327 | ||
734efb46 | 328 | /** |
329 | * cyc2ns - converts clocksource cycles to nanoseconds | |
330 | * @cs: Pointer to clocksource | |
331 | * @cycles: Cycles | |
332 | * | |
333 | * Uses the clocksource and ntp ajdustment to convert cycle_ts to nanoseconds. | |
334 | * | |
335 | * XXX - This could use some mult_lxl_ll() asm optimization | |
336 | */ | |
337 | static inline s64 cyc2ns(struct clocksource *cs, cycle_t cycles) | |
338 | { | |
339 | u64 ret = (u64)cycles; | |
340 | ret = (ret * cs->mult) >> cs->shift; | |
341 | return ret; | |
342 | } | |
343 | ||
344 | /** | |
a2752549 | 345 | * clocksource_calculate_interval - Calculates a clocksource interval struct |
734efb46 | 346 | * |
347 | * @c: Pointer to clocksource. | |
348 | * @length_nsec: Desired interval length in nanoseconds. | |
349 | * | |
350 | * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment | |
351 | * pair and interval request. | |
352 | * | |
353 | * Unless you're the timekeeping code, you should not be using this! | |
354 | */ | |
a2752549 | 355 | static inline void clocksource_calculate_interval(struct clocksource *c, |
f5f1a24a | 356 | unsigned long length_nsec) |
734efb46 | 357 | { |
358 | u64 tmp; | |
359 | ||
1aa5dfb7 | 360 | /* Do the ns -> cycle conversion first, using original mult */ |
734efb46 | 361 | tmp = length_nsec; |
362 | tmp <<= c->shift; | |
1aa5dfb7 JS |
363 | tmp += c->mult_orig/2; |
364 | do_div(tmp, c->mult_orig); | |
734efb46 | 365 | |
19923c19 RZ |
366 | c->cycle_interval = (cycle_t)tmp; |
367 | if (c->cycle_interval == 0) | |
368 | c->cycle_interval = 1; | |
734efb46 | 369 | |
1aa5dfb7 | 370 | /* Go back from cycles -> shifted ns, this time use ntp adjused mult */ |
19923c19 | 371 | c->xtime_interval = (u64)c->cycle_interval * c->mult; |
2d42244a | 372 | c->raw_interval = ((u64)c->cycle_interval * c->mult_orig) >> c->shift; |
5eb6d205 | 373 | } |
374 | ||
375 | ||
734efb46 | 376 | /* used to install a new clocksource */ |
92c7e002 | 377 | extern int clocksource_register(struct clocksource*); |
4713e22c | 378 | extern void clocksource_unregister(struct clocksource*); |
7c3078b6 | 379 | extern void clocksource_touch_watchdog(void); |
92c7e002 TG |
380 | extern struct clocksource* clocksource_get_next(void); |
381 | extern void clocksource_change_rating(struct clocksource *cs, int rating); | |
b52f52a0 | 382 | extern void clocksource_resume(void); |
734efb46 | 383 | |
acc9a9dc | 384 | #ifdef CONFIG_GENERIC_TIME_VSYSCALL |
385 | extern void update_vsyscall(struct timespec *ts, struct clocksource *c); | |
2c622148 | 386 | extern void update_vsyscall_tz(void); |
acc9a9dc | 387 | #else |
388 | static inline void update_vsyscall(struct timespec *ts, struct clocksource *c) | |
389 | { | |
390 | } | |
2c622148 TB |
391 | |
392 | static inline void update_vsyscall_tz(void) | |
393 | { | |
394 | } | |
acc9a9dc | 395 | #endif |
396 | ||
734efb46 | 397 | #endif /* _LINUX_CLOCKSOURCE_H */ |