2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 * -sched_clock( ) no longer jiffies based. Uses the same clocksource
12 * Rajeshwarr/Vineetg: Mar 2008
13 * -Implemented CONFIG_GENERIC_TIME (rather deleted arch specific code)
14 * for arch independent gettimeofday()
15 * -Implemented CONFIG_GENERIC_CLOCKEVENTS as base for hrtimers
17 * Vineetg: Mar 2008: Forked off from time.c which now is time-jiff.c
20 /* ARC700 has two 32bit independent prog Timers: TIMER0 and TIMER1
21 * Each can programmed to go from @count to @limit and optionally
22 * interrupt when that happens.
23 * A write to Control Register clears the Interrupt
25 * We've designated TIMER0 for events (clockevents)
26 * while TIMER1 for free running (clocksource)
28 * Newer ARC700 cores have 64bit clk fetching RTSC insn, preferred over TIMER1
31 #include <linux/spinlock.h>
32 #include <linux/interrupt.h>
33 #include <linux/module.h>
34 #include <linux/sched.h>
35 #include <linux/kernel.h>
36 #include <linux/interrupt.h>
37 #include <linux/time.h>
38 #include <linux/init.h>
39 #include <linux/timex.h>
40 #include <linux/profile.h>
41 #include <linux/clocksource.h>
42 #include <linux/clockchips.h>
44 #include <asm/arcregs.h>
46 #include <asm/mach_desc.h>
48 #define ARC_TIMER_MAX 0xFFFFFFFF
50 /********** Clock Source Device *********/
52 #ifdef CONFIG_ARC_HAS_RTSC
54 int __cpuinit
arc_counter_setup(void)
56 /* RTSC insn taps into cpu clk, needs no setup */
58 /* For SMP, only allowed if cross-core-sync, hence usable as cs */
62 static cycle_t
arc_counter_read(struct clocksource
*cs
)
66 #ifdef CONFIG_CPU_BIG_ENDIAN
67 struct { u32 high
, low
; };
69 struct { u32 low
, high
; };
74 flags
= arch_local_irq_save();
77 " .extCoreRegister tsch, 58, r, cannot_shortcut \n"
79 " mov %1, tsch \n" /* TSCH is extn core reg 58 */
80 : "=r" (stamp
.low
), "=r" (stamp
.high
));
82 arch_local_irq_restore(flags
);
87 static struct clocksource arc_counter
= {
90 .read
= arc_counter_read
,
91 .mask
= CLOCKSOURCE_MASK(64),
92 .flags
= CLOCK_SOURCE_IS_CONTINUOUS
,
95 #else /* !CONFIG_ARC_HAS_RTSC */
97 static bool is_usable_as_clocksource(void)
107 * set 32bit TIMER1 to keep counting monotonically and wraparound
109 int __cpuinit
arc_counter_setup(void)
111 write_aux_reg(ARC_REG_TIMER1_LIMIT
, ARC_TIMER_MAX
);
112 write_aux_reg(ARC_REG_TIMER1_CNT
, 0);
113 write_aux_reg(ARC_REG_TIMER1_CTRL
, TIMER_CTRL_NH
);
115 return is_usable_as_clocksource();
118 static cycle_t
arc_counter_read(struct clocksource
*cs
)
120 return (cycle_t
) read_aux_reg(ARC_REG_TIMER1_CNT
);
123 static struct clocksource arc_counter
= {
124 .name
= "ARC Timer1",
126 .read
= arc_counter_read
,
127 .mask
= CLOCKSOURCE_MASK(32),
128 .flags
= CLOCK_SOURCE_IS_CONTINUOUS
,
133 /********** Clock Event Device *********/
136 * Arm the timer to interrupt after @limit cycles
137 * The distinction for oneshot/periodic is done in arc_event_timer_ack() below
139 static void arc_timer_event_setup(unsigned int limit
)
141 write_aux_reg(ARC_REG_TIMER0_LIMIT
, limit
);
142 write_aux_reg(ARC_REG_TIMER0_CNT
, 0); /* start from 0 */
144 write_aux_reg(ARC_REG_TIMER0_CTRL
, TIMER_CTRL_IE
| TIMER_CTRL_NH
);
148 * Acknowledge the interrupt (oneshot) and optionally re-arm it (periodic)
149 * -Any write to CTRL Reg will ack the intr (NH bit: Count when not halted)
150 * -Rearming is done by setting the IE bit
152 * Small optimisation: Normal code would have been
154 * CTRL_REG = (IE | NH);
157 * However since IE is BIT0 we can fold the branch
159 static void arc_timer_event_ack(unsigned int irq_reenable
)
161 write_aux_reg(ARC_REG_TIMER0_CTRL
, irq_reenable
| TIMER_CTRL_NH
);
164 static int arc_clkevent_set_next_event(unsigned long delta
,
165 struct clock_event_device
*dev
)
167 arc_timer_event_setup(delta
);
171 static void arc_clkevent_set_mode(enum clock_event_mode mode
,
172 struct clock_event_device
*dev
)
175 case CLOCK_EVT_MODE_PERIODIC
:
176 arc_timer_event_setup(arc_get_core_freq() / HZ
);
178 case CLOCK_EVT_MODE_ONESHOT
:
187 static DEFINE_PER_CPU(struct clock_event_device
, arc_clockevent_device
) = {
188 .name
= "ARC Timer0",
189 .features
= CLOCK_EVT_FEAT_ONESHOT
| CLOCK_EVT_FEAT_PERIODIC
,
190 .mode
= CLOCK_EVT_MODE_UNUSED
,
192 .irq
= TIMER0_IRQ
, /* hardwired, no need for resources */
193 .set_next_event
= arc_clkevent_set_next_event
,
194 .set_mode
= arc_clkevent_set_mode
,
197 static irqreturn_t
timer_irq_handler(int irq
, void *dev_id
)
199 struct clock_event_device
*clk
= &__get_cpu_var(arc_clockevent_device
);
201 arc_timer_event_ack(clk
->mode
== CLOCK_EVT_MODE_PERIODIC
);
202 clk
->event_handler(clk
);
206 static struct irqaction arc_timer_irq
= {
207 .name
= "Timer0 (clock-evt-dev)",
208 .flags
= IRQF_TIMER
| IRQF_PERCPU
,
209 .handler
= timer_irq_handler
,
213 * Setup the local event timer for @cpu
214 * N.B. weak so that some exotic ARC SoCs can completely override it
216 void __attribute__((weak
)) __cpuinit
arc_local_timer_setup(unsigned int cpu
)
218 struct clock_event_device
*clk
= &per_cpu(arc_clockevent_device
, cpu
);
220 clockevents_calc_mult_shift(clk
, arc_get_core_freq(), 5);
222 clk
->max_delta_ns
= clockevent_delta2ns(ARC_TIMER_MAX
, clk
);
223 clk
->cpumask
= cpumask_of(cpu
);
225 clockevents_register_device(clk
);
228 * setup the per-cpu timer IRQ handler - for all cpus
229 * For non boot CPU explicitly unmask at intc
230 * setup_irq() -> .. -> irq_startup() already does this on boot-cpu
233 setup_irq(TIMER0_IRQ
, &arc_timer_irq
);
235 arch_unmask_irq(TIMER0_IRQ
);
239 * Called from start_kernel() - boot CPU only
241 * -Sets up h/w timers as applicable on boot cpu
242 * -Also sets up any global state needed for timer subsystem:
243 * - for "counting" timer, registers a clocksource, usable across CPUs
244 * (provided that underlying counter h/w is synchronized across cores)
245 * - for "event" timer, sets up TIMER0 IRQ (as that is platform agnostic)
247 void __init
time_init(void)
250 * sets up the timekeeping free-flowing counter which also returns
251 * whether the counter is usable as clocksource
253 if (arc_counter_setup())
255 * CLK upto 4.29 GHz can be safely represented in 32 bits
256 * because Max 32 bit number is 4,294,967,295
258 clocksource_register_hz(&arc_counter
, arc_get_core_freq());
260 /* sets up the periodic event timer */
261 arc_local_timer_setup(smp_processor_id());
263 if (machine_desc
->init_time
)
264 machine_desc
->init_time();
267 #ifdef CONFIG_ARC_HAS_RTSC
269 * sched_clock math assist
270 * ns = cycles * (ns_per_sec / cpu_freq_hz)
271 * ns = cycles * (10^6 / cpu_freq_khz)
272 * ns = cycles * (10^6 * 2^SF / cpu_freq_khz) / 2^SF
273 * ns = cycles * cyc2ns_scale >> SF
275 #define CYC2NS_SF 10 /* 2^10, carefully chosen */
276 #define CYC2NS_SCALE ((1000000 << CYC2NS_SF) / (arc_get_core_freq() / 1000))
278 static unsigned long long cycles2ns(unsigned long long cyc
)
280 return (cyc
* CYC2NS_SCALE
) >> CYC2NS_SF
;
284 * Scheduler clock - a monotonically increasing clock in nanosec units.
285 * It's return value must NOT wrap around.
287 * - Since 32bit TIMER1 will overflow almost immediately (53sec @ 80MHz), it
288 * can't be used directly.
289 * - Using getrawmonotonic (TIMER1 based, but with state for last + current
290 * snapshots), is no-good either because of seqlock deadlock possibilities
291 * - So only with native 64bit timer we do this, otherwise fallback to generic
292 * jiffies based version - which despite not being fine grained gaurantees
293 * the monotonically increasing semantics.
295 unsigned long long sched_clock(void)
297 return cycles2ns(arc_counter_read(NULL
));