Merge branch 'pci/resource' into next
[deliverable/linux.git] / kernel / time / sched_clock.c
1 /*
2 * sched_clock.c: support for extending counters to full 64-bit ns counter
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8 #include <linux/clocksource.h>
9 #include <linux/init.h>
10 #include <linux/jiffies.h>
11 #include <linux/ktime.h>
12 #include <linux/kernel.h>
13 #include <linux/moduleparam.h>
14 #include <linux/sched.h>
15 #include <linux/syscore_ops.h>
16 #include <linux/hrtimer.h>
17 #include <linux/sched_clock.h>
18 #include <linux/seqlock.h>
19 #include <linux/bitops.h>
20
21 struct clock_data {
22 ktime_t wrap_kt;
23 u64 epoch_ns;
24 u64 epoch_cyc;
25 seqcount_t seq;
26 unsigned long rate;
27 u32 mult;
28 u32 shift;
29 bool suspended;
30 };
31
32 static struct hrtimer sched_clock_timer;
33 static int irqtime = -1;
34
35 core_param(irqtime, irqtime, int, 0400);
36
37 static struct clock_data cd = {
38 .mult = NSEC_PER_SEC / HZ,
39 };
40
41 static u64 __read_mostly sched_clock_mask;
42
43 static u64 notrace jiffy_sched_clock_read(void)
44 {
45 /*
46 * We don't need to use get_jiffies_64 on 32-bit arches here
47 * because we register with BITS_PER_LONG
48 */
49 return (u64)(jiffies - INITIAL_JIFFIES);
50 }
51
52 static u32 __read_mostly (*read_sched_clock_32)(void);
53
54 static u64 notrace read_sched_clock_32_wrapper(void)
55 {
56 return read_sched_clock_32();
57 }
58
59 static u64 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read;
60
61 static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
62 {
63 return (cyc * mult) >> shift;
64 }
65
66 unsigned long long notrace sched_clock(void)
67 {
68 u64 epoch_ns;
69 u64 epoch_cyc;
70 u64 cyc;
71 unsigned long seq;
72
73 if (cd.suspended)
74 return cd.epoch_ns;
75
76 do {
77 seq = raw_read_seqcount_begin(&cd.seq);
78 epoch_cyc = cd.epoch_cyc;
79 epoch_ns = cd.epoch_ns;
80 } while (read_seqcount_retry(&cd.seq, seq));
81
82 cyc = read_sched_clock();
83 cyc = (cyc - epoch_cyc) & sched_clock_mask;
84 return epoch_ns + cyc_to_ns(cyc, cd.mult, cd.shift);
85 }
86
87 /*
88 * Atomically update the sched_clock epoch.
89 */
90 static void notrace update_sched_clock(void)
91 {
92 unsigned long flags;
93 u64 cyc;
94 u64 ns;
95
96 cyc = read_sched_clock();
97 ns = cd.epoch_ns +
98 cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask,
99 cd.mult, cd.shift);
100
101 raw_local_irq_save(flags);
102 raw_write_seqcount_begin(&cd.seq);
103 cd.epoch_ns = ns;
104 cd.epoch_cyc = cyc;
105 raw_write_seqcount_end(&cd.seq);
106 raw_local_irq_restore(flags);
107 }
108
109 static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt)
110 {
111 update_sched_clock();
112 hrtimer_forward_now(hrt, cd.wrap_kt);
113 return HRTIMER_RESTART;
114 }
115
116 void __init sched_clock_register(u64 (*read)(void), int bits,
117 unsigned long rate)
118 {
119 unsigned long r;
120 u64 res, wrap;
121 char r_unit;
122
123 if (cd.rate > rate)
124 return;
125
126 WARN_ON(!irqs_disabled());
127 read_sched_clock = read;
128 sched_clock_mask = CLOCKSOURCE_MASK(bits);
129 cd.rate = rate;
130
131 /* calculate the mult/shift to convert counter ticks to ns. */
132 clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 3600);
133
134 r = rate;
135 if (r >= 4000000) {
136 r /= 1000000;
137 r_unit = 'M';
138 } else if (r >= 1000) {
139 r /= 1000;
140 r_unit = 'k';
141 } else
142 r_unit = ' ';
143
144 /* calculate how many ns until we wrap */
145 wrap = clocks_calc_max_nsecs(cd.mult, cd.shift, 0, sched_clock_mask);
146 cd.wrap_kt = ns_to_ktime(wrap - (wrap >> 3));
147
148 /* calculate the ns resolution of this counter */
149 res = cyc_to_ns(1ULL, cd.mult, cd.shift);
150 pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n",
151 bits, r, r_unit, res, wrap);
152
153 update_sched_clock();
154
155 /*
156 * Ensure that sched_clock() starts off at 0ns
157 */
158 cd.epoch_ns = 0;
159
160 /* Enable IRQ time accounting if we have a fast enough sched_clock */
161 if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
162 enable_sched_clock_irqtime();
163
164 pr_debug("Registered %pF as sched_clock source\n", read);
165 }
166
167 void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
168 {
169 read_sched_clock_32 = read;
170 sched_clock_register(read_sched_clock_32_wrapper, bits, rate);
171 }
172
173 void __init sched_clock_postinit(void)
174 {
175 /*
176 * If no sched_clock function has been provided at that point,
177 * make it the final one one.
178 */
179 if (read_sched_clock == jiffy_sched_clock_read)
180 sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ);
181
182 update_sched_clock();
183
184 /*
185 * Start the timer to keep sched_clock() properly updated and
186 * sets the initial epoch.
187 */
188 hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
189 sched_clock_timer.function = sched_clock_poll;
190 hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
191 }
192
193 static int sched_clock_suspend(void)
194 {
195 sched_clock_poll(&sched_clock_timer);
196 cd.suspended = true;
197 return 0;
198 }
199
200 static void sched_clock_resume(void)
201 {
202 cd.epoch_cyc = read_sched_clock();
203 cd.suspended = false;
204 }
205
206 static struct syscore_ops sched_clock_ops = {
207 .suspend = sched_clock_suspend,
208 .resume = sched_clock_resume,
209 };
210
211 static int __init sched_clock_syscore_init(void)
212 {
213 register_syscore_ops(&sched_clock_ops);
214 return 0;
215 }
216 device_initcall(sched_clock_syscore_init);
This page took 0.042356 seconds and 5 git commands to generate.