Commit | Line | Data |
---|---|---|
aa01666d PM |
1 | /* |
2 | * arch/sh/kernel/timers/timer-tmu.c - TMU Timer Support | |
3 | * | |
57be2b48 | 4 | * Copyright (C) 2005 - 2007 Paul Mundt |
aa01666d PM |
5 | * |
6 | * TMU handling code hacked out of arch/sh/kernel/time.c | |
7 | * | |
8 | * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka | |
9 | * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> | |
10 | * Copyright (C) 2002, 2003, 2004 Paul Mundt | |
11 | * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org> | |
12 | * | |
13 | * This file is subject to the terms and conditions of the GNU General Public | |
14 | * License. See the file "COPYING" in the main directory of this archive | |
15 | * for more details. | |
16 | */ | |
17 | #include <linux/init.h> | |
18 | #include <linux/kernel.h> | |
19 | #include <linux/interrupt.h> | |
aa01666d | 20 | #include <linux/seqlock.h> |
57be2b48 | 21 | #include <linux/clockchips.h> |
aa01666d PM |
22 | #include <asm/timer.h> |
23 | #include <asm/rtc.h> | |
24 | #include <asm/io.h> | |
25 | #include <asm/irq.h> | |
26 | #include <asm/clock.h> | |
27 | ||
28 | #define TMU_TOCR_INIT 0x00 | |
57be2b48 | 29 | #define TMU_TCR_INIT 0x0020 |
aa01666d | 30 | |
57be2b48 PM |
31 | static int tmu_timer_start(void) |
32 | { | |
2b1bd1ac | 33 | ctrl_outb(ctrl_inb(TMU_012_TSTR) | 0x3, TMU_012_TSTR); |
57be2b48 PM |
34 | return 0; |
35 | } | |
aa01666d | 36 | |
57be2b48 | 37 | static void tmu0_timer_set_interval(unsigned long interval, unsigned int reload) |
aa01666d | 38 | { |
57be2b48 | 39 | ctrl_outl(interval, TMU0_TCNT); |
aa01666d PM |
40 | |
41 | /* | |
57be2b48 PM |
42 | * TCNT reloads from TCOR on underflow, clear it if we don't |
43 | * intend to auto-reload | |
aa01666d | 44 | */ |
57be2b48 PM |
45 | if (reload) |
46 | ctrl_outl(interval, TMU0_TCOR); | |
47 | else | |
48 | ctrl_outl(0, TMU0_TCOR); | |
aa01666d | 49 | |
57be2b48 PM |
50 | tmu_timer_start(); |
51 | } | |
aa01666d | 52 | |
57be2b48 PM |
53 | static int tmu_timer_stop(void) |
54 | { | |
2b1bd1ac | 55 | ctrl_outb(ctrl_inb(TMU_012_TSTR) & ~0x3, TMU_012_TSTR); |
57be2b48 PM |
56 | return 0; |
57 | } | |
aa01666d | 58 | |
57be2b48 PM |
59 | static cycle_t tmu_timer_read(void) |
60 | { | |
61 | return ~ctrl_inl(TMU1_TCNT); | |
62 | } | |
63 | ||
64 | static int tmu_set_next_event(unsigned long cycles, | |
65 | struct clock_event_device *evt) | |
66 | { | |
67 | tmu0_timer_set_interval(cycles, 1); | |
68 | return 0; | |
69 | } | |
aa01666d | 70 | |
57be2b48 PM |
71 | static void tmu_set_mode(enum clock_event_mode mode, |
72 | struct clock_event_device *evt) | |
73 | { | |
74 | switch (mode) { | |
75 | case CLOCK_EVT_MODE_PERIODIC: | |
76 | ctrl_outl(ctrl_inl(TMU0_TCNT), TMU0_TCOR); | |
77 | break; | |
78 | case CLOCK_EVT_MODE_ONESHOT: | |
79 | ctrl_outl(0, TMU0_TCOR); | |
80 | break; | |
81 | case CLOCK_EVT_MODE_UNUSED: | |
82 | case CLOCK_EVT_MODE_SHUTDOWN: | |
18de5bc4 | 83 | case CLOCK_EVT_MODE_RESUME: |
57be2b48 PM |
84 | break; |
85 | } | |
aa01666d PM |
86 | } |
87 | ||
57be2b48 PM |
88 | static struct clock_event_device tmu0_clockevent = { |
89 | .name = "tmu0", | |
90 | .shift = 32, | |
91 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, | |
92 | .set_mode = tmu_set_mode, | |
93 | .set_next_event = tmu_set_next_event, | |
94 | }; | |
95 | ||
35f3c518 | 96 | static irqreturn_t tmu_timer_interrupt(int irq, void *dummy) |
aa01666d | 97 | { |
57be2b48 | 98 | struct clock_event_device *evt = &tmu0_clockevent; |
aa01666d PM |
99 | unsigned long timer_status; |
100 | ||
101 | /* Clear UNF bit */ | |
102 | timer_status = ctrl_inw(TMU0_TCR); | |
103 | timer_status &= ~0x100; | |
104 | ctrl_outw(timer_status, TMU0_TCR); | |
105 | ||
57be2b48 | 106 | evt->event_handler(evt); |
aa01666d PM |
107 | |
108 | return IRQ_HANDLED; | |
109 | } | |
110 | ||
57be2b48 PM |
111 | static struct irqaction tmu0_irq = { |
112 | .name = "periodic timer", | |
aa01666d | 113 | .handler = tmu_timer_interrupt, |
e9485bae | 114 | .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, |
aa01666d PM |
115 | .mask = CPU_MASK_NONE, |
116 | }; | |
117 | ||
57be2b48 | 118 | static void tmu0_clk_init(struct clk *clk) |
aa01666d | 119 | { |
57be2b48 PM |
120 | u8 divisor = TMU_TCR_INIT & 0x7; |
121 | ctrl_outw(TMU_TCR_INIT, TMU0_TCR); | |
aa01666d PM |
122 | clk->rate = clk->parent->rate / (4 << (divisor << 1)); |
123 | } | |
124 | ||
57be2b48 | 125 | static void tmu0_clk_recalc(struct clk *clk) |
aa01666d PM |
126 | { |
127 | u8 divisor = ctrl_inw(TMU0_TCR) & 0x7; | |
128 | clk->rate = clk->parent->rate / (4 << (divisor << 1)); | |
129 | } | |
130 | ||
57be2b48 PM |
131 | static struct clk_ops tmu0_clk_ops = { |
132 | .init = tmu0_clk_init, | |
133 | .recalc = tmu0_clk_recalc, | |
aa01666d PM |
134 | }; |
135 | ||
136 | static struct clk tmu0_clk = { | |
137 | .name = "tmu0_clk", | |
57be2b48 | 138 | .ops = &tmu0_clk_ops, |
aa01666d PM |
139 | }; |
140 | ||
57be2b48 | 141 | static void tmu1_clk_init(struct clk *clk) |
3aa770e7 | 142 | { |
57be2b48 PM |
143 | u8 divisor = TMU_TCR_INIT & 0x7; |
144 | ctrl_outw(divisor, TMU1_TCR); | |
145 | clk->rate = clk->parent->rate / (4 << (divisor << 1)); | |
3aa770e7 AS |
146 | } |
147 | ||
57be2b48 | 148 | static void tmu1_clk_recalc(struct clk *clk) |
3aa770e7 | 149 | { |
57be2b48 PM |
150 | u8 divisor = ctrl_inw(TMU1_TCR) & 0x7; |
151 | clk->rate = clk->parent->rate / (4 << (divisor << 1)); | |
3aa770e7 AS |
152 | } |
153 | ||
57be2b48 PM |
154 | static struct clk_ops tmu1_clk_ops = { |
155 | .init = tmu1_clk_init, | |
156 | .recalc = tmu1_clk_recalc, | |
157 | }; | |
158 | ||
159 | static struct clk tmu1_clk = { | |
160 | .name = "tmu1_clk", | |
161 | .ops = &tmu1_clk_ops, | |
162 | }; | |
163 | ||
aa01666d PM |
164 | static int tmu_timer_init(void) |
165 | { | |
166 | unsigned long interval; | |
57be2b48 | 167 | unsigned long frequency; |
aa01666d | 168 | |
57be2b48 | 169 | setup_irq(CONFIG_SH_TIMER_IRQ, &tmu0_irq); |
aa01666d | 170 | |
1d118562 | 171 | tmu0_clk.parent = clk_get(NULL, "module_clk"); |
57be2b48 | 172 | tmu1_clk.parent = clk_get(NULL, "module_clk"); |
aa01666d | 173 | |
3aa770e7 | 174 | tmu_timer_stop(); |
57be2b48 | 175 | |
3ea6bc3d | 176 | #if !defined(CONFIG_CPU_SUBTYPE_SH7720) && \ |
31a49c4b | 177 | !defined(CONFIG_CPU_SUBTYPE_SH7721) && \ |
3ea6bc3d | 178 | !defined(CONFIG_CPU_SUBTYPE_SH7760) && \ |
2b1bd1ac PM |
179 | !defined(CONFIG_CPU_SUBTYPE_SH7785) && \ |
180 | !defined(CONFIG_CPU_SUBTYPE_SHX3) | |
aa01666d PM |
181 | ctrl_outb(TMU_TOCR_INIT, TMU_TOCR); |
182 | #endif | |
183 | ||
184 | clk_register(&tmu0_clk); | |
57be2b48 | 185 | clk_register(&tmu1_clk); |
aa01666d | 186 | clk_enable(&tmu0_clk); |
57be2b48 | 187 | clk_enable(&tmu1_clk); |
aa01666d | 188 | |
57be2b48 PM |
189 | frequency = clk_get_rate(&tmu0_clk); |
190 | interval = (frequency + HZ / 2) / HZ; | |
aa01666d | 191 | |
57be2b48 PM |
192 | sh_hpt_frequency = clk_get_rate(&tmu1_clk); |
193 | ctrl_outl(~0, TMU1_TCNT); | |
194 | ctrl_outl(~0, TMU1_TCOR); | |
aa01666d | 195 | |
57be2b48 PM |
196 | tmu0_timer_set_interval(interval, 1); |
197 | ||
198 | tmu0_clockevent.mult = div_sc(frequency, NSEC_PER_SEC, | |
199 | tmu0_clockevent.shift); | |
200 | tmu0_clockevent.max_delta_ns = | |
201 | clockevent_delta2ns(-1, &tmu0_clockevent); | |
202 | tmu0_clockevent.min_delta_ns = | |
203 | clockevent_delta2ns(1, &tmu0_clockevent); | |
204 | ||
205 | tmu0_clockevent.cpumask = cpumask_of_cpu(0); | |
206 | ||
207 | clockevents_register_device(&tmu0_clockevent); | |
aa01666d PM |
208 | |
209 | return 0; | |
210 | } | |
211 | ||
4c1cfab1 | 212 | static struct sys_timer_ops tmu_timer_ops = { |
aa01666d | 213 | .init = tmu_timer_init, |
3aa770e7 AS |
214 | .start = tmu_timer_start, |
215 | .stop = tmu_timer_stop, | |
57be2b48 | 216 | .read = tmu_timer_read, |
aa01666d PM |
217 | }; |
218 | ||
219 | struct sys_timer tmu_timer = { | |
220 | .name = "tmu", | |
221 | .ops = &tmu_timer_ops, | |
222 | }; |