Commit | Line | Data |
---|---|---|
112f38a4 RK |
1 | /* |
2 | * sched_clock.c: support for extending counters to full 64-bit ns counter | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | */ | |
8 | #include <linux/clocksource.h> | |
9 | #include <linux/init.h> | |
10 | #include <linux/jiffies.h> | |
11 | #include <linux/kernel.h> | |
12 | #include <linux/sched.h> | |
f153d017 | 13 | #include <linux/syscore_ops.h> |
112f38a4 RK |
14 | #include <linux/timer.h> |
15 | ||
16 | #include <asm/sched_clock.h> | |
17 | ||
2f0778af MZ |
18 | struct clock_data { |
19 | u64 epoch_ns; | |
20 | u32 epoch_cyc; | |
21 | u32 epoch_cyc_copy; | |
22 | u32 mult; | |
23 | u32 shift; | |
237ec6f2 CC |
24 | bool suspended; |
25 | bool needs_suspend; | |
2f0778af MZ |
26 | }; |
27 | ||
112f38a4 RK |
28 | static void sched_clock_poll(unsigned long wrap_ticks); |
29 | static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0); | |
2f0778af MZ |
30 | |
31 | static struct clock_data cd = { | |
32 | .mult = NSEC_PER_SEC / HZ, | |
33 | }; | |
34 | ||
35 | static u32 __read_mostly sched_clock_mask = 0xffffffff; | |
36 | ||
37 | static u32 notrace jiffy_sched_clock_read(void) | |
38 | { | |
39 | return (u32)(jiffies - INITIAL_JIFFIES); | |
40 | } | |
41 | ||
42 | static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read; | |
43 | ||
44 | static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift) | |
45 | { | |
46 | return (cyc * mult) >> shift; | |
47 | } | |
48 | ||
49 | static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask) | |
50 | { | |
51 | u64 epoch_ns; | |
52 | u32 epoch_cyc; | |
53 | ||
237ec6f2 CC |
54 | if (cd.suspended) |
55 | return cd.epoch_ns; | |
56 | ||
2f0778af MZ |
57 | /* |
58 | * Load the epoch_cyc and epoch_ns atomically. We do this by | |
59 | * ensuring that we always write epoch_cyc, epoch_ns and | |
60 | * epoch_cyc_copy in strict order, and read them in strict order. | |
61 | * If epoch_cyc and epoch_cyc_copy are not equal, then we're in | |
62 | * the middle of an update, and we should repeat the load. | |
63 | */ | |
64 | do { | |
65 | epoch_cyc = cd.epoch_cyc; | |
66 | smp_rmb(); | |
67 | epoch_ns = cd.epoch_ns; | |
68 | smp_rmb(); | |
69 | } while (epoch_cyc != cd.epoch_cyc_copy); | |
70 | ||
71 | return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, cd.mult, cd.shift); | |
72 | } | |
73 | ||
74 | /* | |
75 | * Atomically update the sched_clock epoch. | |
76 | */ | |
77 | static void notrace update_sched_clock(void) | |
78 | { | |
79 | unsigned long flags; | |
80 | u32 cyc; | |
81 | u64 ns; | |
82 | ||
83 | cyc = read_sched_clock(); | |
84 | ns = cd.epoch_ns + | |
85 | cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask, | |
86 | cd.mult, cd.shift); | |
87 | /* | |
88 | * Write epoch_cyc and epoch_ns in a way that the update is | |
89 | * detectable in cyc_to_fixed_sched_clock(). | |
90 | */ | |
91 | raw_local_irq_save(flags); | |
92 | cd.epoch_cyc = cyc; | |
93 | smp_wmb(); | |
94 | cd.epoch_ns = ns; | |
95 | smp_wmb(); | |
96 | cd.epoch_cyc_copy = cyc; | |
97 | raw_local_irq_restore(flags); | |
98 | } | |
112f38a4 RK |
99 | |
100 | static void sched_clock_poll(unsigned long wrap_ticks) | |
101 | { | |
102 | mod_timer(&sched_clock_timer, round_jiffies(jiffies + wrap_ticks)); | |
2f0778af | 103 | update_sched_clock(); |
112f38a4 RK |
104 | } |
105 | ||
237ec6f2 CC |
106 | void __init setup_sched_clock_needs_suspend(u32 (*read)(void), int bits, |
107 | unsigned long rate) | |
108 | { | |
109 | setup_sched_clock(read, bits, rate); | |
110 | cd.needs_suspend = true; | |
111 | } | |
112 | ||
2f0778af | 113 | void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate) |
112f38a4 RK |
114 | { |
115 | unsigned long r, w; | |
116 | u64 res, wrap; | |
117 | char r_unit; | |
118 | ||
2f0778af MZ |
119 | BUG_ON(bits > 32); |
120 | WARN_ON(!irqs_disabled()); | |
121 | WARN_ON(read_sched_clock != jiffy_sched_clock_read); | |
122 | read_sched_clock = read; | |
123 | sched_clock_mask = (1 << bits) - 1; | |
112f38a4 RK |
124 | |
125 | /* calculate the mult/shift to convert counter ticks to ns. */ | |
2f0778af | 126 | clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 0); |
112f38a4 RK |
127 | |
128 | r = rate; | |
129 | if (r >= 4000000) { | |
130 | r /= 1000000; | |
131 | r_unit = 'M'; | |
2f0778af | 132 | } else if (r >= 1000) { |
112f38a4 RK |
133 | r /= 1000; |
134 | r_unit = 'k'; | |
2f0778af MZ |
135 | } else |
136 | r_unit = ' '; | |
112f38a4 RK |
137 | |
138 | /* calculate how many ns until we wrap */ | |
2f0778af | 139 | wrap = cyc_to_ns((1ULL << bits) - 1, cd.mult, cd.shift); |
112f38a4 RK |
140 | do_div(wrap, NSEC_PER_MSEC); |
141 | w = wrap; | |
142 | ||
143 | /* calculate the ns resolution of this counter */ | |
2f0778af | 144 | res = cyc_to_ns(1ULL, cd.mult, cd.shift); |
112f38a4 | 145 | pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n", |
2f0778af | 146 | bits, r, r_unit, res, w); |
112f38a4 RK |
147 | |
148 | /* | |
149 | * Start the timer to keep sched_clock() properly updated and | |
150 | * sets the initial epoch. | |
151 | */ | |
152 | sched_clock_timer.data = msecs_to_jiffies(w - (w / 10)); | |
2f0778af | 153 | update_sched_clock(); |
112f38a4 RK |
154 | |
155 | /* | |
156 | * Ensure that sched_clock() starts off at 0ns | |
157 | */ | |
2f0778af MZ |
158 | cd.epoch_ns = 0; |
159 | ||
160 | pr_debug("Registered %pF as sched_clock source\n", read); | |
161 | } | |
162 | ||
163 | unsigned long long notrace sched_clock(void) | |
164 | { | |
165 | u32 cyc = read_sched_clock(); | |
166 | return cyc_to_sched_clock(cyc, sched_clock_mask); | |
112f38a4 | 167 | } |
211baa70 RK |
168 | |
169 | void __init sched_clock_postinit(void) | |
170 | { | |
2f0778af MZ |
171 | /* |
172 | * If no sched_clock function has been provided at that point, | |
173 | * make it the final one one. | |
174 | */ | |
175 | if (read_sched_clock == jiffy_sched_clock_read) | |
176 | setup_sched_clock(jiffy_sched_clock_read, 32, HZ); | |
177 | ||
211baa70 RK |
178 | sched_clock_poll(sched_clock_timer.data); |
179 | } | |
f153d017 RK |
180 | |
181 | static int sched_clock_suspend(void) | |
182 | { | |
183 | sched_clock_poll(sched_clock_timer.data); | |
237ec6f2 CC |
184 | if (cd.needs_suspend) |
185 | cd.suspended = true; | |
f153d017 RK |
186 | return 0; |
187 | } | |
188 | ||
237ec6f2 CC |
189 | static void sched_clock_resume(void) |
190 | { | |
191 | if (cd.needs_suspend) { | |
192 | cd.epoch_cyc = read_sched_clock(); | |
193 | cd.epoch_cyc_copy = cd.epoch_cyc; | |
194 | cd.suspended = false; | |
195 | } | |
196 | } | |
197 | ||
f153d017 RK |
198 | static struct syscore_ops sched_clock_ops = { |
199 | .suspend = sched_clock_suspend, | |
237ec6f2 | 200 | .resume = sched_clock_resume, |
f153d017 RK |
201 | }; |
202 | ||
203 | static int __init sched_clock_syscore_init(void) | |
204 | { | |
205 | register_syscore_ops(&sched_clock_ops); | |
206 | return 0; | |
207 | } | |
208 | device_initcall(sched_clock_syscore_init); |