0ce0e6f76eb07e3a572f8b519fa1a183b9e797e5
[deliverable/linux.git] / arch / arc / kernel / time.c
1 /*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * vineetg: Jan 1011
9 * -sched_clock( ) no longer jiffies based. Uses the same clocksource
10 * as gtod
11 *
12 * Rajeshwarr/Vineetg: Mar 2008
13 * -Implemented CONFIG_GENERIC_TIME (rather deleted arch specific code)
14 * for arch independent gettimeofday()
15 * -Implemented CONFIG_GENERIC_CLOCKEVENTS as base for hrtimers
16 *
17 * Vineetg: Mar 2008: Forked off from time.c which now is time-jiff.c
18 */
19
20 /* ARC700 has two 32bit independent prog Timers: TIMER0 and TIMER1
21 * Each can programmed to go from @count to @limit and optionally
22 * interrupt when that happens.
23 * A write to Control Register clears the Interrupt
24 *
25 * We've designated TIMER0 for events (clockevents)
26 * while TIMER1 for free running (clocksource)
27 *
28 * Newer ARC700 cores have 64bit clk fetching RTSC insn, preferred over TIMER1
29 */
30
31 #include <linux/spinlock.h>
32 #include <linux/interrupt.h>
33 #include <linux/module.h>
34 #include <linux/sched.h>
35 #include <linux/kernel.h>
36 #include <linux/interrupt.h>
37 #include <linux/time.h>
38 #include <linux/init.h>
39 #include <linux/timex.h>
40 #include <linux/profile.h>
41 #include <linux/clocksource.h>
42 #include <linux/clockchips.h>
43 #include <asm/irq.h>
44 #include <asm/arcregs.h>
45 #include <asm/clk.h>
46 #include <asm/mach_desc.h>
47
48 #define ARC_TIMER_MAX 0xFFFFFFFF
49
50 /********** Clock Source Device *********/
51
52 #ifdef CONFIG_ARC_HAS_RTSC
53
54 int __cpuinit arc_counter_setup(void)
55 {
56 /* RTSC insn taps into cpu clk, needs no setup */
57
58 /* For SMP, only allowed if cross-core-sync, hence usable as cs */
59 return 1;
60 }
61
62 static cycle_t arc_counter_read(struct clocksource *cs)
63 {
64 unsigned long flags;
65 union {
66 #ifdef CONFIG_CPU_BIG_ENDIAN
67 struct { u32 high, low; };
68 #else
69 struct { u32 low, high; };
70 #endif
71 cycle_t full;
72 } stamp;
73
74 flags = arch_local_irq_save();
75
76 __asm__ __volatile(
77 " .extCoreRegister tsch, 58, r, cannot_shortcut \n"
78 " rtsc %0, 0 \n"
79 " mov %1, tsch \n" /* TSCH is extn core reg 58 */
80 : "=r" (stamp.low), "=r" (stamp.high));
81
82 arch_local_irq_restore(flags);
83
84 return stamp.full;
85 }
86
87 static struct clocksource arc_counter = {
88 .name = "ARC RTSC",
89 .rating = 300,
90 .read = arc_counter_read,
91 .mask = CLOCKSOURCE_MASK(64),
92 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
93 };
94
95 #else /* !CONFIG_ARC_HAS_RTSC */
96
97 static bool is_usable_as_clocksource(void)
98 {
99 #ifdef CONFIG_SMP
100 return 0;
101 #else
102 return 1;
103 #endif
104 }
105
106 /*
107 * set 32bit TIMER1 to keep counting monotonically and wraparound
108 */
109 int __cpuinit arc_counter_setup(void)
110 {
111 write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMER_MAX);
112 write_aux_reg(ARC_REG_TIMER1_CNT, 0);
113 write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
114
115 return is_usable_as_clocksource();
116 }
117
118 static cycle_t arc_counter_read(struct clocksource *cs)
119 {
120 return (cycle_t) read_aux_reg(ARC_REG_TIMER1_CNT);
121 }
122
123 static struct clocksource arc_counter = {
124 .name = "ARC Timer1",
125 .rating = 300,
126 .read = arc_counter_read,
127 .mask = CLOCKSOURCE_MASK(32),
128 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
129 };
130
131 #endif
132
133 /********** Clock Event Device *********/
134
135 /*
136 * Arm the timer to interrupt after @limit cycles
137 * The distinction for oneshot/periodic is done in arc_event_timer_ack() below
138 */
139 static void arc_timer_event_setup(unsigned int limit)
140 {
141 write_aux_reg(ARC_REG_TIMER0_LIMIT, limit);
142 write_aux_reg(ARC_REG_TIMER0_CNT, 0); /* start from 0 */
143
144 write_aux_reg(ARC_REG_TIMER0_CTRL, TIMER_CTRL_IE | TIMER_CTRL_NH);
145 }
146
147 /*
148 * Acknowledge the interrupt (oneshot) and optionally re-arm it (periodic)
149 * -Any write to CTRL Reg will ack the intr (NH bit: Count when not halted)
150 * -Rearming is done by setting the IE bit
151 *
152 * Small optimisation: Normal code would have been
153 * if (irq_reenable)
154 * CTRL_REG = (IE | NH);
155 * else
156 * CTRL_REG = NH;
157 * However since IE is BIT0 we can fold the branch
158 */
159 static void arc_timer_event_ack(unsigned int irq_reenable)
160 {
161 write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | TIMER_CTRL_NH);
162 }
163
164 static int arc_clkevent_set_next_event(unsigned long delta,
165 struct clock_event_device *dev)
166 {
167 arc_timer_event_setup(delta);
168 return 0;
169 }
170
171 static void arc_clkevent_set_mode(enum clock_event_mode mode,
172 struct clock_event_device *dev)
173 {
174 switch (mode) {
175 case CLOCK_EVT_MODE_PERIODIC:
176 arc_timer_event_setup(arc_get_core_freq() / HZ);
177 break;
178 case CLOCK_EVT_MODE_ONESHOT:
179 break;
180 default:
181 break;
182 }
183
184 return;
185 }
186
187 static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = {
188 .name = "ARC Timer0",
189 .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
190 .mode = CLOCK_EVT_MODE_UNUSED,
191 .rating = 300,
192 .irq = TIMER0_IRQ, /* hardwired, no need for resources */
193 .set_next_event = arc_clkevent_set_next_event,
194 .set_mode = arc_clkevent_set_mode,
195 };
196
197 static irqreturn_t timer_irq_handler(int irq, void *dev_id)
198 {
199 struct clock_event_device *clk = &__get_cpu_var(arc_clockevent_device);
200
201 arc_timer_event_ack(clk->mode == CLOCK_EVT_MODE_PERIODIC);
202 clk->event_handler(clk);
203 return IRQ_HANDLED;
204 }
205
206 static struct irqaction arc_timer_irq = {
207 .name = "Timer0 (clock-evt-dev)",
208 .flags = IRQF_TIMER | IRQF_PERCPU,
209 .handler = timer_irq_handler,
210 };
211
212 /*
213 * Setup the local event timer for @cpu
214 * N.B. weak so that some exotic ARC SoCs can completely override it
215 */
216 void __attribute__((weak)) __cpuinit arc_local_timer_setup(unsigned int cpu)
217 {
218 struct clock_event_device *clk = &per_cpu(arc_clockevent_device, cpu);
219
220 clockevents_calc_mult_shift(clk, arc_get_core_freq(), 5);
221
222 clk->max_delta_ns = clockevent_delta2ns(ARC_TIMER_MAX, clk);
223 clk->cpumask = cpumask_of(cpu);
224
225 clockevents_register_device(clk);
226
227 /*
228 * setup the per-cpu timer IRQ handler - for all cpus
229 * For non boot CPU explicitly unmask at intc
230 * setup_irq() -> .. -> irq_startup() already does this on boot-cpu
231 */
232 if (!cpu)
233 setup_irq(TIMER0_IRQ, &arc_timer_irq);
234 else
235 arch_unmask_irq(TIMER0_IRQ);
236 }
237
238 /*
239 * Called from start_kernel() - boot CPU only
240 *
241 * -Sets up h/w timers as applicable on boot cpu
242 * -Also sets up any global state needed for timer subsystem:
243 * - for "counting" timer, registers a clocksource, usable across CPUs
244 * (provided that underlying counter h/w is synchronized across cores)
245 * - for "event" timer, sets up TIMER0 IRQ (as that is platform agnostic)
246 */
247 void __init time_init(void)
248 {
249 /*
250 * sets up the timekeeping free-flowing counter which also returns
251 * whether the counter is usable as clocksource
252 */
253 if (arc_counter_setup())
254 /*
255 * CLK upto 4.29 GHz can be safely represented in 32 bits
256 * because Max 32 bit number is 4,294,967,295
257 */
258 clocksource_register_hz(&arc_counter, arc_get_core_freq());
259
260 /* sets up the periodic event timer */
261 arc_local_timer_setup(smp_processor_id());
262
263 if (machine_desc->init_time)
264 machine_desc->init_time();
265 }
266
267 #ifdef CONFIG_ARC_HAS_RTSC
268 /*
269 * sched_clock math assist
270 * ns = cycles * (ns_per_sec / cpu_freq_hz)
271 * ns = cycles * (10^6 / cpu_freq_khz)
272 * ns = cycles * (10^6 * 2^SF / cpu_freq_khz) / 2^SF
273 * ns = cycles * cyc2ns_scale >> SF
274 */
275 #define CYC2NS_SF 10 /* 2^10, carefully chosen */
276 #define CYC2NS_SCALE ((1000000 << CYC2NS_SF) / (arc_get_core_freq() / 1000))
277
278 static unsigned long long cycles2ns(unsigned long long cyc)
279 {
280 return (cyc * CYC2NS_SCALE ) >> CYC2NS_SF;
281 }
282
283 /*
284 * Scheduler clock - a monotonically increasing clock in nanosec units.
285 * It's return value must NOT wrap around.
286 *
287 * - Since 32bit TIMER1 will overflow almost immediately (53sec @ 80MHz), it
288 * can't be used directly.
289 * - Using getrawmonotonic (TIMER1 based, but with state for last + current
290 * snapshots), is no-good either because of seqlock deadlock possibilities
291 * - So only with native 64bit timer we do this, otherwise fallback to generic
292 * jiffies based version - which despite not being fine grained gaurantees
293 * the monotonically increasing semantics.
294 */
295 unsigned long long sched_clock(void)
296 {
297 return cycles2ns(arc_counter_read(NULL));
298 }
299 #endif
This page took 0.091336 seconds and 4 git commands to generate.