Commit | Line | Data |
---|---|---|
9570ef20 MD |
1 | /* |
2 | * SuperH Timer Support - TMU | |
3 | * | |
4 | * Copyright (C) 2009 Magnus Damm | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; either version 2 of the License | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program; if not, write to the Free Software | |
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
18 | */ | |
19 | ||
20 | #include <linux/init.h> | |
21 | #include <linux/platform_device.h> | |
22 | #include <linux/spinlock.h> | |
23 | #include <linux/interrupt.h> | |
24 | #include <linux/ioport.h> | |
25 | #include <linux/delay.h> | |
26 | #include <linux/io.h> | |
27 | #include <linux/clk.h> | |
28 | #include <linux/irq.h> | |
29 | #include <linux/err.h> | |
30 | #include <linux/clocksource.h> | |
31 | #include <linux/clockchips.h> | |
46a12f74 | 32 | #include <linux/sh_timer.h> |
5a0e3ad6 | 33 | #include <linux/slab.h> |
7deeab5d | 34 | #include <linux/module.h> |
2ee619f9 | 35 | #include <linux/pm_domain.h> |
eaa49a8c | 36 | #include <linux/pm_runtime.h> |
9570ef20 | 37 | |
0a72aa39 | 38 | struct sh_tmu_device; |
de2d12c7 LP |
39 | |
40 | struct sh_tmu_channel { | |
0a72aa39 | 41 | struct sh_tmu_device *tmu; |
fe68eb80 | 42 | unsigned int index; |
de2d12c7 | 43 | |
de693461 | 44 | void __iomem *base; |
1c56cf6b | 45 | int irq; |
de2d12c7 | 46 | |
9570ef20 MD |
47 | unsigned long rate; |
48 | unsigned long periodic; | |
49 | struct clock_event_device ced; | |
50 | struct clocksource cs; | |
eaa49a8c | 51 | bool cs_enabled; |
61a53bfa | 52 | unsigned int enable_count; |
9570ef20 MD |
53 | }; |
54 | ||
0a72aa39 | 55 | struct sh_tmu_device { |
de2d12c7 LP |
56 | struct platform_device *pdev; |
57 | ||
58 | void __iomem *mapbase; | |
59 | struct clk *clk; | |
60 | ||
a5de49f4 LP |
61 | struct sh_tmu_channel *channels; |
62 | unsigned int num_channels; | |
de2d12c7 LP |
63 | }; |
64 | ||
c2225a57 | 65 | static DEFINE_RAW_SPINLOCK(sh_tmu_lock); |
9570ef20 MD |
66 | |
67 | #define TSTR -1 /* shared register */ | |
68 | #define TCOR 0 /* channel register */ | |
69 | #define TCNT 1 /* channel register */ | |
70 | #define TCR 2 /* channel register */ | |
71 | ||
de2d12c7 | 72 | static inline unsigned long sh_tmu_read(struct sh_tmu_channel *ch, int reg_nr) |
9570ef20 | 73 | { |
9570ef20 MD |
74 | unsigned long offs; |
75 | ||
76 | if (reg_nr == TSTR) | |
de693461 | 77 | return ioread8(ch->tmu->mapbase); |
9570ef20 MD |
78 | |
79 | offs = reg_nr << 2; | |
80 | ||
81 | if (reg_nr == TCR) | |
de693461 | 82 | return ioread16(ch->base + offs); |
9570ef20 | 83 | else |
de693461 | 84 | return ioread32(ch->base + offs); |
9570ef20 MD |
85 | } |
86 | ||
de2d12c7 | 87 | static inline void sh_tmu_write(struct sh_tmu_channel *ch, int reg_nr, |
9570ef20 MD |
88 | unsigned long value) |
89 | { | |
9570ef20 MD |
90 | unsigned long offs; |
91 | ||
92 | if (reg_nr == TSTR) { | |
de693461 | 93 | iowrite8(value, ch->tmu->mapbase); |
9570ef20 MD |
94 | return; |
95 | } | |
96 | ||
97 | offs = reg_nr << 2; | |
98 | ||
99 | if (reg_nr == TCR) | |
de693461 | 100 | iowrite16(value, ch->base + offs); |
9570ef20 | 101 | else |
de693461 | 102 | iowrite32(value, ch->base + offs); |
9570ef20 MD |
103 | } |
104 | ||
de2d12c7 | 105 | static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start) |
9570ef20 | 106 | { |
9570ef20 MD |
107 | unsigned long flags, value; |
108 | ||
109 | /* start stop register shared by multiple timer channels */ | |
c2225a57 | 110 | raw_spin_lock_irqsave(&sh_tmu_lock, flags); |
de2d12c7 | 111 | value = sh_tmu_read(ch, TSTR); |
9570ef20 MD |
112 | |
113 | if (start) | |
fe68eb80 | 114 | value |= 1 << ch->index; |
9570ef20 | 115 | else |
fe68eb80 | 116 | value &= ~(1 << ch->index); |
9570ef20 | 117 | |
de2d12c7 | 118 | sh_tmu_write(ch, TSTR, value); |
c2225a57 | 119 | raw_spin_unlock_irqrestore(&sh_tmu_lock, flags); |
9570ef20 MD |
120 | } |
121 | ||
de2d12c7 | 122 | static int __sh_tmu_enable(struct sh_tmu_channel *ch) |
9570ef20 | 123 | { |
9570ef20 MD |
124 | int ret; |
125 | ||
d4905ce3 | 126 | /* enable clock */ |
de2d12c7 | 127 | ret = clk_enable(ch->tmu->clk); |
9570ef20 | 128 | if (ret) { |
fe68eb80 LP |
129 | dev_err(&ch->tmu->pdev->dev, "ch%u: cannot enable clock\n", |
130 | ch->index); | |
9570ef20 MD |
131 | return ret; |
132 | } | |
133 | ||
134 | /* make sure channel is disabled */ | |
de2d12c7 | 135 | sh_tmu_start_stop_ch(ch, 0); |
9570ef20 MD |
136 | |
137 | /* maximum timeout */ | |
de2d12c7 LP |
138 | sh_tmu_write(ch, TCOR, 0xffffffff); |
139 | sh_tmu_write(ch, TCNT, 0xffffffff); | |
9570ef20 MD |
140 | |
141 | /* configure channel to parent clock / 4, irq off */ | |
de2d12c7 LP |
142 | ch->rate = clk_get_rate(ch->tmu->clk) / 4; |
143 | sh_tmu_write(ch, TCR, 0x0000); | |
9570ef20 MD |
144 | |
145 | /* enable channel */ | |
de2d12c7 | 146 | sh_tmu_start_stop_ch(ch, 1); |
9570ef20 MD |
147 | |
148 | return 0; | |
149 | } | |
150 | ||
de2d12c7 | 151 | static int sh_tmu_enable(struct sh_tmu_channel *ch) |
61a53bfa | 152 | { |
de2d12c7 | 153 | if (ch->enable_count++ > 0) |
61a53bfa RW |
154 | return 0; |
155 | ||
de2d12c7 LP |
156 | pm_runtime_get_sync(&ch->tmu->pdev->dev); |
157 | dev_pm_syscore_device(&ch->tmu->pdev->dev, true); | |
61a53bfa | 158 | |
de2d12c7 | 159 | return __sh_tmu_enable(ch); |
61a53bfa RW |
160 | } |
161 | ||
de2d12c7 | 162 | static void __sh_tmu_disable(struct sh_tmu_channel *ch) |
9570ef20 MD |
163 | { |
164 | /* disable channel */ | |
de2d12c7 | 165 | sh_tmu_start_stop_ch(ch, 0); |
9570ef20 | 166 | |
be890a1a | 167 | /* disable interrupts in TMU block */ |
de2d12c7 | 168 | sh_tmu_write(ch, TCR, 0x0000); |
be890a1a | 169 | |
d4905ce3 | 170 | /* stop clock */ |
de2d12c7 | 171 | clk_disable(ch->tmu->clk); |
9570ef20 MD |
172 | } |
173 | ||
de2d12c7 | 174 | static void sh_tmu_disable(struct sh_tmu_channel *ch) |
61a53bfa | 175 | { |
de2d12c7 | 176 | if (WARN_ON(ch->enable_count == 0)) |
61a53bfa RW |
177 | return; |
178 | ||
de2d12c7 | 179 | if (--ch->enable_count > 0) |
61a53bfa RW |
180 | return; |
181 | ||
de2d12c7 | 182 | __sh_tmu_disable(ch); |
61a53bfa | 183 | |
de2d12c7 LP |
184 | dev_pm_syscore_device(&ch->tmu->pdev->dev, false); |
185 | pm_runtime_put(&ch->tmu->pdev->dev); | |
61a53bfa RW |
186 | } |
187 | ||
de2d12c7 | 188 | static void sh_tmu_set_next(struct sh_tmu_channel *ch, unsigned long delta, |
9570ef20 MD |
189 | int periodic) |
190 | { | |
191 | /* stop timer */ | |
de2d12c7 | 192 | sh_tmu_start_stop_ch(ch, 0); |
9570ef20 MD |
193 | |
194 | /* acknowledge interrupt */ | |
de2d12c7 | 195 | sh_tmu_read(ch, TCR); |
9570ef20 MD |
196 | |
197 | /* enable interrupt */ | |
de2d12c7 | 198 | sh_tmu_write(ch, TCR, 0x0020); |
9570ef20 MD |
199 | |
200 | /* reload delta value in case of periodic timer */ | |
201 | if (periodic) | |
de2d12c7 | 202 | sh_tmu_write(ch, TCOR, delta); |
9570ef20 | 203 | else |
de2d12c7 | 204 | sh_tmu_write(ch, TCOR, 0xffffffff); |
9570ef20 | 205 | |
de2d12c7 | 206 | sh_tmu_write(ch, TCNT, delta); |
9570ef20 MD |
207 | |
208 | /* start timer */ | |
de2d12c7 | 209 | sh_tmu_start_stop_ch(ch, 1); |
9570ef20 MD |
210 | } |
211 | ||
212 | static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id) | |
213 | { | |
de2d12c7 | 214 | struct sh_tmu_channel *ch = dev_id; |
9570ef20 MD |
215 | |
216 | /* disable or acknowledge interrupt */ | |
de2d12c7 LP |
217 | if (ch->ced.mode == CLOCK_EVT_MODE_ONESHOT) |
218 | sh_tmu_write(ch, TCR, 0x0000); | |
9570ef20 | 219 | else |
de2d12c7 | 220 | sh_tmu_write(ch, TCR, 0x0020); |
9570ef20 MD |
221 | |
222 | /* notify clockevent layer */ | |
de2d12c7 | 223 | ch->ced.event_handler(&ch->ced); |
9570ef20 MD |
224 | return IRQ_HANDLED; |
225 | } | |
226 | ||
de2d12c7 | 227 | static struct sh_tmu_channel *cs_to_sh_tmu(struct clocksource *cs) |
9570ef20 | 228 | { |
de2d12c7 | 229 | return container_of(cs, struct sh_tmu_channel, cs); |
9570ef20 MD |
230 | } |
231 | ||
232 | static cycle_t sh_tmu_clocksource_read(struct clocksource *cs) | |
233 | { | |
de2d12c7 | 234 | struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); |
9570ef20 | 235 | |
de2d12c7 | 236 | return sh_tmu_read(ch, TCNT) ^ 0xffffffff; |
9570ef20 MD |
237 | } |
238 | ||
239 | static int sh_tmu_clocksource_enable(struct clocksource *cs) | |
240 | { | |
de2d12c7 | 241 | struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); |
0aeac458 | 242 | int ret; |
9570ef20 | 243 | |
de2d12c7 | 244 | if (WARN_ON(ch->cs_enabled)) |
61a53bfa RW |
245 | return 0; |
246 | ||
de2d12c7 | 247 | ret = sh_tmu_enable(ch); |
eaa49a8c | 248 | if (!ret) { |
de2d12c7 LP |
249 | __clocksource_updatefreq_hz(cs, ch->rate); |
250 | ch->cs_enabled = true; | |
eaa49a8c | 251 | } |
61a53bfa | 252 | |
0aeac458 | 253 | return ret; |
9570ef20 MD |
254 | } |
255 | ||
256 | static void sh_tmu_clocksource_disable(struct clocksource *cs) | |
257 | { | |
de2d12c7 | 258 | struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); |
eaa49a8c | 259 | |
de2d12c7 | 260 | if (WARN_ON(!ch->cs_enabled)) |
61a53bfa | 261 | return; |
eaa49a8c | 262 | |
de2d12c7 LP |
263 | sh_tmu_disable(ch); |
264 | ch->cs_enabled = false; | |
eaa49a8c RW |
265 | } |
266 | ||
267 | static void sh_tmu_clocksource_suspend(struct clocksource *cs) | |
268 | { | |
de2d12c7 | 269 | struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); |
eaa49a8c | 270 | |
de2d12c7 | 271 | if (!ch->cs_enabled) |
61a53bfa | 272 | return; |
eaa49a8c | 273 | |
de2d12c7 LP |
274 | if (--ch->enable_count == 0) { |
275 | __sh_tmu_disable(ch); | |
276 | pm_genpd_syscore_poweroff(&ch->tmu->pdev->dev); | |
61a53bfa | 277 | } |
eaa49a8c RW |
278 | } |
279 | ||
280 | static void sh_tmu_clocksource_resume(struct clocksource *cs) | |
281 | { | |
de2d12c7 | 282 | struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); |
eaa49a8c | 283 | |
de2d12c7 | 284 | if (!ch->cs_enabled) |
61a53bfa RW |
285 | return; |
286 | ||
de2d12c7 LP |
287 | if (ch->enable_count++ == 0) { |
288 | pm_genpd_syscore_poweron(&ch->tmu->pdev->dev); | |
289 | __sh_tmu_enable(ch); | |
61a53bfa | 290 | } |
9570ef20 MD |
291 | } |
292 | ||
de2d12c7 | 293 | static int sh_tmu_register_clocksource(struct sh_tmu_channel *ch, |
84876d05 | 294 | const char *name, unsigned long rating) |
9570ef20 | 295 | { |
de2d12c7 | 296 | struct clocksource *cs = &ch->cs; |
9570ef20 MD |
297 | |
298 | cs->name = name; | |
299 | cs->rating = rating; | |
300 | cs->read = sh_tmu_clocksource_read; | |
301 | cs->enable = sh_tmu_clocksource_enable; | |
302 | cs->disable = sh_tmu_clocksource_disable; | |
eaa49a8c RW |
303 | cs->suspend = sh_tmu_clocksource_suspend; |
304 | cs->resume = sh_tmu_clocksource_resume; | |
9570ef20 MD |
305 | cs->mask = CLOCKSOURCE_MASK(32); |
306 | cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; | |
66f49121 | 307 | |
fe68eb80 LP |
308 | dev_info(&ch->tmu->pdev->dev, "ch%u: used as clock source\n", |
309 | ch->index); | |
0aeac458 MD |
310 | |
311 | /* Register with dummy 1 Hz value, gets updated in ->enable() */ | |
312 | clocksource_register_hz(cs, 1); | |
9570ef20 MD |
313 | return 0; |
314 | } | |
315 | ||
de2d12c7 | 316 | static struct sh_tmu_channel *ced_to_sh_tmu(struct clock_event_device *ced) |
9570ef20 | 317 | { |
de2d12c7 | 318 | return container_of(ced, struct sh_tmu_channel, ced); |
9570ef20 MD |
319 | } |
320 | ||
de2d12c7 | 321 | static void sh_tmu_clock_event_start(struct sh_tmu_channel *ch, int periodic) |
9570ef20 | 322 | { |
de2d12c7 | 323 | struct clock_event_device *ced = &ch->ced; |
9570ef20 | 324 | |
de2d12c7 | 325 | sh_tmu_enable(ch); |
9570ef20 | 326 | |
de2d12c7 | 327 | clockevents_config(ced, ch->rate); |
9570ef20 MD |
328 | |
329 | if (periodic) { | |
de2d12c7 LP |
330 | ch->periodic = (ch->rate + HZ/2) / HZ; |
331 | sh_tmu_set_next(ch, ch->periodic, 1); | |
9570ef20 MD |
332 | } |
333 | } | |
334 | ||
335 | static void sh_tmu_clock_event_mode(enum clock_event_mode mode, | |
336 | struct clock_event_device *ced) | |
337 | { | |
de2d12c7 | 338 | struct sh_tmu_channel *ch = ced_to_sh_tmu(ced); |
9570ef20 MD |
339 | int disabled = 0; |
340 | ||
341 | /* deal with old setting first */ | |
342 | switch (ced->mode) { | |
343 | case CLOCK_EVT_MODE_PERIODIC: | |
344 | case CLOCK_EVT_MODE_ONESHOT: | |
de2d12c7 | 345 | sh_tmu_disable(ch); |
9570ef20 MD |
346 | disabled = 1; |
347 | break; | |
348 | default: | |
349 | break; | |
350 | } | |
351 | ||
352 | switch (mode) { | |
353 | case CLOCK_EVT_MODE_PERIODIC: | |
de2d12c7 | 354 | dev_info(&ch->tmu->pdev->dev, |
fe68eb80 | 355 | "ch%u: used for periodic clock events\n", ch->index); |
de2d12c7 | 356 | sh_tmu_clock_event_start(ch, 1); |
9570ef20 MD |
357 | break; |
358 | case CLOCK_EVT_MODE_ONESHOT: | |
de2d12c7 | 359 | dev_info(&ch->tmu->pdev->dev, |
fe68eb80 | 360 | "ch%u: used for oneshot clock events\n", ch->index); |
de2d12c7 | 361 | sh_tmu_clock_event_start(ch, 0); |
9570ef20 MD |
362 | break; |
363 | case CLOCK_EVT_MODE_UNUSED: | |
364 | if (!disabled) | |
de2d12c7 | 365 | sh_tmu_disable(ch); |
9570ef20 MD |
366 | break; |
367 | case CLOCK_EVT_MODE_SHUTDOWN: | |
368 | default: | |
369 | break; | |
370 | } | |
371 | } | |
372 | ||
373 | static int sh_tmu_clock_event_next(unsigned long delta, | |
374 | struct clock_event_device *ced) | |
375 | { | |
de2d12c7 | 376 | struct sh_tmu_channel *ch = ced_to_sh_tmu(ced); |
9570ef20 MD |
377 | |
378 | BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT); | |
379 | ||
380 | /* program new delta value */ | |
de2d12c7 | 381 | sh_tmu_set_next(ch, delta, 0); |
9570ef20 MD |
382 | return 0; |
383 | } | |
384 | ||
eaa49a8c RW |
385 | static void sh_tmu_clock_event_suspend(struct clock_event_device *ced) |
386 | { | |
de2d12c7 | 387 | pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced)->tmu->pdev->dev); |
eaa49a8c RW |
388 | } |
389 | ||
390 | static void sh_tmu_clock_event_resume(struct clock_event_device *ced) | |
391 | { | |
de2d12c7 | 392 | pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced)->tmu->pdev->dev); |
eaa49a8c RW |
393 | } |
394 | ||
de2d12c7 | 395 | static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch, |
84876d05 | 396 | const char *name, unsigned long rating) |
9570ef20 | 397 | { |
de2d12c7 | 398 | struct clock_event_device *ced = &ch->ced; |
9570ef20 MD |
399 | int ret; |
400 | ||
9570ef20 MD |
401 | ced->name = name; |
402 | ced->features = CLOCK_EVT_FEAT_PERIODIC; | |
403 | ced->features |= CLOCK_EVT_FEAT_ONESHOT; | |
404 | ced->rating = rating; | |
405 | ced->cpumask = cpumask_of(0); | |
406 | ced->set_next_event = sh_tmu_clock_event_next; | |
407 | ced->set_mode = sh_tmu_clock_event_mode; | |
eaa49a8c RW |
408 | ced->suspend = sh_tmu_clock_event_suspend; |
409 | ced->resume = sh_tmu_clock_event_resume; | |
9570ef20 | 410 | |
fe68eb80 LP |
411 | dev_info(&ch->tmu->pdev->dev, "ch%u: used for clock events\n", |
412 | ch->index); | |
3977407e PM |
413 | |
414 | clockevents_config_and_register(ced, 1, 0x300, 0xffffffff); | |
da64c2a8 | 415 | |
de2d12c7 | 416 | ret = request_irq(ch->irq, sh_tmu_interrupt, |
1c56cf6b | 417 | IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, |
de2d12c7 | 418 | dev_name(&ch->tmu->pdev->dev), ch); |
9570ef20 | 419 | if (ret) { |
fe68eb80 LP |
420 | dev_err(&ch->tmu->pdev->dev, "ch%u: failed to request irq %d\n", |
421 | ch->index, ch->irq); | |
9570ef20 MD |
422 | return; |
423 | } | |
9570ef20 MD |
424 | } |
425 | ||
84876d05 | 426 | static int sh_tmu_register(struct sh_tmu_channel *ch, const char *name, |
9570ef20 MD |
427 | unsigned long clockevent_rating, |
428 | unsigned long clocksource_rating) | |
429 | { | |
430 | if (clockevent_rating) | |
de2d12c7 | 431 | sh_tmu_register_clockevent(ch, name, clockevent_rating); |
9570ef20 | 432 | else if (clocksource_rating) |
de2d12c7 | 433 | sh_tmu_register_clocksource(ch, name, clocksource_rating); |
9570ef20 MD |
434 | |
435 | return 0; | |
436 | } | |
437 | ||
a94ddaa6 LP |
438 | static int sh_tmu_channel_setup(struct sh_tmu_channel *ch, |
439 | struct sh_tmu_device *tmu) | |
440 | { | |
441 | struct sh_timer_config *cfg = tmu->pdev->dev.platform_data; | |
442 | ||
a94ddaa6 LP |
443 | ch->tmu = tmu; |
444 | ||
fe68eb80 LP |
445 | /* |
446 | * The SH3 variant (SH770x, SH7705, SH7710 and SH7720) maps channel | |
447 | * registers blocks at base + 2 + 12 * index, while all other variants | |
448 | * map them at base + 4 + 12 * index. We can compute the index by just | |
449 | * dividing by 12, the 2 bytes or 4 bytes offset being hidden by the | |
450 | * integer division. | |
451 | */ | |
452 | ch->index = cfg->channel_offset / 12; | |
453 | ||
a94ddaa6 LP |
454 | ch->irq = platform_get_irq(tmu->pdev, 0); |
455 | if (ch->irq < 0) { | |
fe68eb80 LP |
456 | dev_err(&tmu->pdev->dev, "ch%u: failed to get irq\n", |
457 | ch->index); | |
a94ddaa6 LP |
458 | return ch->irq; |
459 | } | |
460 | ||
461 | ch->cs_enabled = false; | |
462 | ch->enable_count = 0; | |
463 | ||
84876d05 | 464 | return sh_tmu_register(ch, dev_name(&tmu->pdev->dev), |
a94ddaa6 LP |
465 | cfg->clockevent_rating, |
466 | cfg->clocksource_rating); | |
467 | } | |
468 | ||
0a72aa39 | 469 | static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev) |
9570ef20 | 470 | { |
46a12f74 | 471 | struct sh_timer_config *cfg = pdev->dev.platform_data; |
9570ef20 | 472 | struct resource *res; |
a5de49f4 | 473 | void __iomem *base; |
1c56cf6b | 474 | int ret; |
9570ef20 MD |
475 | ret = -ENXIO; |
476 | ||
0a72aa39 | 477 | tmu->pdev = pdev; |
9570ef20 MD |
478 | |
479 | if (!cfg) { | |
0a72aa39 | 480 | dev_err(&tmu->pdev->dev, "missing platform data\n"); |
9570ef20 MD |
481 | goto err0; |
482 | } | |
483 | ||
0a72aa39 | 484 | platform_set_drvdata(pdev, tmu); |
9570ef20 | 485 | |
0a72aa39 | 486 | res = platform_get_resource(tmu->pdev, IORESOURCE_MEM, 0); |
9570ef20 | 487 | if (!res) { |
0a72aa39 | 488 | dev_err(&tmu->pdev->dev, "failed to get I/O memory\n"); |
9570ef20 MD |
489 | goto err0; |
490 | } | |
491 | ||
de693461 | 492 | /* |
a5de49f4 | 493 | * Map memory, let base point to our channel and mapbase to the |
de693461 LP |
494 | * start/stop shared register. |
495 | */ | |
a5de49f4 LP |
496 | base = ioremap_nocache(res->start, resource_size(res)); |
497 | if (base == NULL) { | |
0a72aa39 | 498 | dev_err(&tmu->pdev->dev, "failed to remap I/O memory\n"); |
9570ef20 MD |
499 | goto err0; |
500 | } | |
501 | ||
a5de49f4 | 502 | tmu->mapbase = base - cfg->channel_offset; |
de693461 | 503 | |
9570ef20 | 504 | /* get hold of clock */ |
0a72aa39 LP |
505 | tmu->clk = clk_get(&tmu->pdev->dev, "tmu_fck"); |
506 | if (IS_ERR(tmu->clk)) { | |
507 | dev_err(&tmu->pdev->dev, "cannot get clock\n"); | |
508 | ret = PTR_ERR(tmu->clk); | |
03ff858c | 509 | goto err1; |
9570ef20 | 510 | } |
1c09eb3e | 511 | |
0a72aa39 | 512 | ret = clk_prepare(tmu->clk); |
1c09eb3e LP |
513 | if (ret < 0) |
514 | goto err2; | |
515 | ||
a5de49f4 LP |
516 | tmu->channels = kzalloc(sizeof(*tmu->channels), GFP_KERNEL); |
517 | if (tmu->channels == NULL) { | |
518 | ret = -ENOMEM; | |
519 | goto err3; | |
520 | } | |
521 | ||
522 | tmu->num_channels = 1; | |
523 | ||
524 | tmu->channels[0].base = base; | |
525 | ||
526 | ret = sh_tmu_channel_setup(&tmu->channels[0], tmu); | |
394a4486 | 527 | if (ret < 0) |
1c09eb3e | 528 | goto err3; |
394a4486 LP |
529 | |
530 | return 0; | |
531 | ||
1c09eb3e | 532 | err3: |
a5de49f4 | 533 | kfree(tmu->channels); |
0a72aa39 | 534 | clk_unprepare(tmu->clk); |
394a4486 | 535 | err2: |
0a72aa39 | 536 | clk_put(tmu->clk); |
9570ef20 | 537 | err1: |
a5de49f4 | 538 | iounmap(base); |
9570ef20 MD |
539 | err0: |
540 | return ret; | |
541 | } | |
542 | ||
1850514b | 543 | static int sh_tmu_probe(struct platform_device *pdev) |
9570ef20 | 544 | { |
0a72aa39 | 545 | struct sh_tmu_device *tmu = platform_get_drvdata(pdev); |
61a53bfa | 546 | struct sh_timer_config *cfg = pdev->dev.platform_data; |
9570ef20 MD |
547 | int ret; |
548 | ||
eaa49a8c | 549 | if (!is_early_platform_device(pdev)) { |
61a53bfa RW |
550 | pm_runtime_set_active(&pdev->dev); |
551 | pm_runtime_enable(&pdev->dev); | |
eaa49a8c | 552 | } |
2ee619f9 | 553 | |
0a72aa39 | 554 | if (tmu) { |
214a607a | 555 | dev_info(&pdev->dev, "kept as earlytimer\n"); |
61a53bfa | 556 | goto out; |
9570ef20 MD |
557 | } |
558 | ||
3b77a83e | 559 | tmu = kzalloc(sizeof(*tmu), GFP_KERNEL); |
0a72aa39 | 560 | if (tmu == NULL) { |
9570ef20 MD |
561 | dev_err(&pdev->dev, "failed to allocate driver data\n"); |
562 | return -ENOMEM; | |
563 | } | |
564 | ||
0a72aa39 | 565 | ret = sh_tmu_setup(tmu, pdev); |
9570ef20 | 566 | if (ret) { |
0a72aa39 | 567 | kfree(tmu); |
61a53bfa RW |
568 | pm_runtime_idle(&pdev->dev); |
569 | return ret; | |
9570ef20 | 570 | } |
61a53bfa RW |
571 | if (is_early_platform_device(pdev)) |
572 | return 0; | |
573 | ||
574 | out: | |
575 | if (cfg->clockevent_rating || cfg->clocksource_rating) | |
576 | pm_runtime_irq_safe(&pdev->dev); | |
577 | else | |
578 | pm_runtime_idle(&pdev->dev); | |
579 | ||
580 | return 0; | |
9570ef20 MD |
581 | } |
582 | ||
1850514b | 583 | static int sh_tmu_remove(struct platform_device *pdev) |
9570ef20 MD |
584 | { |
585 | return -EBUSY; /* cannot unregister clockevent and clocksource */ | |
586 | } | |
587 | ||
588 | static struct platform_driver sh_tmu_device_driver = { | |
589 | .probe = sh_tmu_probe, | |
1850514b | 590 | .remove = sh_tmu_remove, |
9570ef20 MD |
591 | .driver = { |
592 | .name = "sh_tmu", | |
593 | } | |
594 | }; | |
595 | ||
596 | static int __init sh_tmu_init(void) | |
597 | { | |
598 | return platform_driver_register(&sh_tmu_device_driver); | |
599 | } | |
600 | ||
601 | static void __exit sh_tmu_exit(void) | |
602 | { | |
603 | platform_driver_unregister(&sh_tmu_device_driver); | |
604 | } | |
605 | ||
606 | early_platform_init("earlytimer", &sh_tmu_device_driver); | |
b9773c3f | 607 | subsys_initcall(sh_tmu_init); |
9570ef20 MD |
608 | module_exit(sh_tmu_exit); |
609 | ||
610 | MODULE_AUTHOR("Magnus Damm"); | |
611 | MODULE_DESCRIPTION("SuperH TMU Timer Driver"); | |
612 | MODULE_LICENSE("GPL v2"); |