Commit | Line | Data |
---|---|---|
9570ef20 MD |
1 | /* |
2 | * SuperH Timer Support - TMU | |
3 | * | |
4 | * Copyright (C) 2009 Magnus Damm | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; either version 2 of the License | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program; if not, write to the Free Software | |
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
18 | */ | |
19 | ||
20 | #include <linux/init.h> | |
21 | #include <linux/platform_device.h> | |
22 | #include <linux/spinlock.h> | |
23 | #include <linux/interrupt.h> | |
24 | #include <linux/ioport.h> | |
25 | #include <linux/delay.h> | |
26 | #include <linux/io.h> | |
27 | #include <linux/clk.h> | |
28 | #include <linux/irq.h> | |
29 | #include <linux/err.h> | |
30 | #include <linux/clocksource.h> | |
31 | #include <linux/clockchips.h> | |
46a12f74 | 32 | #include <linux/sh_timer.h> |
5a0e3ad6 | 33 | #include <linux/slab.h> |
7deeab5d | 34 | #include <linux/module.h> |
2ee619f9 | 35 | #include <linux/pm_domain.h> |
eaa49a8c | 36 | #include <linux/pm_runtime.h> |
9570ef20 | 37 | |
0a72aa39 | 38 | struct sh_tmu_device; |
de2d12c7 LP |
39 | |
40 | struct sh_tmu_channel { | |
0a72aa39 | 41 | struct sh_tmu_device *tmu; |
fe68eb80 | 42 | unsigned int index; |
de2d12c7 | 43 | |
de693461 | 44 | void __iomem *base; |
1c56cf6b | 45 | int irq; |
de2d12c7 | 46 | |
9570ef20 MD |
47 | unsigned long rate; |
48 | unsigned long periodic; | |
49 | struct clock_event_device ced; | |
50 | struct clocksource cs; | |
eaa49a8c | 51 | bool cs_enabled; |
61a53bfa | 52 | unsigned int enable_count; |
9570ef20 MD |
53 | }; |
54 | ||
0a72aa39 | 55 | struct sh_tmu_device { |
de2d12c7 LP |
56 | struct platform_device *pdev; |
57 | ||
58 | void __iomem *mapbase; | |
59 | struct clk *clk; | |
60 | ||
61 | struct sh_tmu_channel channel; | |
62 | }; | |
63 | ||
c2225a57 | 64 | static DEFINE_RAW_SPINLOCK(sh_tmu_lock); |
9570ef20 MD |
65 | |
66 | #define TSTR -1 /* shared register */ | |
67 | #define TCOR 0 /* channel register */ | |
68 | #define TCNT 1 /* channel register */ | |
69 | #define TCR 2 /* channel register */ | |
70 | ||
de2d12c7 | 71 | static inline unsigned long sh_tmu_read(struct sh_tmu_channel *ch, int reg_nr) |
9570ef20 | 72 | { |
9570ef20 MD |
73 | unsigned long offs; |
74 | ||
75 | if (reg_nr == TSTR) | |
de693461 | 76 | return ioread8(ch->tmu->mapbase); |
9570ef20 MD |
77 | |
78 | offs = reg_nr << 2; | |
79 | ||
80 | if (reg_nr == TCR) | |
de693461 | 81 | return ioread16(ch->base + offs); |
9570ef20 | 82 | else |
de693461 | 83 | return ioread32(ch->base + offs); |
9570ef20 MD |
84 | } |
85 | ||
de2d12c7 | 86 | static inline void sh_tmu_write(struct sh_tmu_channel *ch, int reg_nr, |
9570ef20 MD |
87 | unsigned long value) |
88 | { | |
9570ef20 MD |
89 | unsigned long offs; |
90 | ||
91 | if (reg_nr == TSTR) { | |
de693461 | 92 | iowrite8(value, ch->tmu->mapbase); |
9570ef20 MD |
93 | return; |
94 | } | |
95 | ||
96 | offs = reg_nr << 2; | |
97 | ||
98 | if (reg_nr == TCR) | |
de693461 | 99 | iowrite16(value, ch->base + offs); |
9570ef20 | 100 | else |
de693461 | 101 | iowrite32(value, ch->base + offs); |
9570ef20 MD |
102 | } |
103 | ||
de2d12c7 | 104 | static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start) |
9570ef20 | 105 | { |
9570ef20 MD |
106 | unsigned long flags, value; |
107 | ||
108 | /* start stop register shared by multiple timer channels */ | |
c2225a57 | 109 | raw_spin_lock_irqsave(&sh_tmu_lock, flags); |
de2d12c7 | 110 | value = sh_tmu_read(ch, TSTR); |
9570ef20 MD |
111 | |
112 | if (start) | |
fe68eb80 | 113 | value |= 1 << ch->index; |
9570ef20 | 114 | else |
fe68eb80 | 115 | value &= ~(1 << ch->index); |
9570ef20 | 116 | |
de2d12c7 | 117 | sh_tmu_write(ch, TSTR, value); |
c2225a57 | 118 | raw_spin_unlock_irqrestore(&sh_tmu_lock, flags); |
9570ef20 MD |
119 | } |
120 | ||
de2d12c7 | 121 | static int __sh_tmu_enable(struct sh_tmu_channel *ch) |
9570ef20 | 122 | { |
9570ef20 MD |
123 | int ret; |
124 | ||
d4905ce3 | 125 | /* enable clock */ |
de2d12c7 | 126 | ret = clk_enable(ch->tmu->clk); |
9570ef20 | 127 | if (ret) { |
fe68eb80 LP |
128 | dev_err(&ch->tmu->pdev->dev, "ch%u: cannot enable clock\n", |
129 | ch->index); | |
9570ef20 MD |
130 | return ret; |
131 | } | |
132 | ||
133 | /* make sure channel is disabled */ | |
de2d12c7 | 134 | sh_tmu_start_stop_ch(ch, 0); |
9570ef20 MD |
135 | |
136 | /* maximum timeout */ | |
de2d12c7 LP |
137 | sh_tmu_write(ch, TCOR, 0xffffffff); |
138 | sh_tmu_write(ch, TCNT, 0xffffffff); | |
9570ef20 MD |
139 | |
140 | /* configure channel to parent clock / 4, irq off */ | |
de2d12c7 LP |
141 | ch->rate = clk_get_rate(ch->tmu->clk) / 4; |
142 | sh_tmu_write(ch, TCR, 0x0000); | |
9570ef20 MD |
143 | |
144 | /* enable channel */ | |
de2d12c7 | 145 | sh_tmu_start_stop_ch(ch, 1); |
9570ef20 MD |
146 | |
147 | return 0; | |
148 | } | |
149 | ||
de2d12c7 | 150 | static int sh_tmu_enable(struct sh_tmu_channel *ch) |
61a53bfa | 151 | { |
de2d12c7 | 152 | if (ch->enable_count++ > 0) |
61a53bfa RW |
153 | return 0; |
154 | ||
de2d12c7 LP |
155 | pm_runtime_get_sync(&ch->tmu->pdev->dev); |
156 | dev_pm_syscore_device(&ch->tmu->pdev->dev, true); | |
61a53bfa | 157 | |
de2d12c7 | 158 | return __sh_tmu_enable(ch); |
61a53bfa RW |
159 | } |
160 | ||
de2d12c7 | 161 | static void __sh_tmu_disable(struct sh_tmu_channel *ch) |
9570ef20 MD |
162 | { |
163 | /* disable channel */ | |
de2d12c7 | 164 | sh_tmu_start_stop_ch(ch, 0); |
9570ef20 | 165 | |
be890a1a | 166 | /* disable interrupts in TMU block */ |
de2d12c7 | 167 | sh_tmu_write(ch, TCR, 0x0000); |
be890a1a | 168 | |
d4905ce3 | 169 | /* stop clock */ |
de2d12c7 | 170 | clk_disable(ch->tmu->clk); |
9570ef20 MD |
171 | } |
172 | ||
de2d12c7 | 173 | static void sh_tmu_disable(struct sh_tmu_channel *ch) |
61a53bfa | 174 | { |
de2d12c7 | 175 | if (WARN_ON(ch->enable_count == 0)) |
61a53bfa RW |
176 | return; |
177 | ||
de2d12c7 | 178 | if (--ch->enable_count > 0) |
61a53bfa RW |
179 | return; |
180 | ||
de2d12c7 | 181 | __sh_tmu_disable(ch); |
61a53bfa | 182 | |
de2d12c7 LP |
183 | dev_pm_syscore_device(&ch->tmu->pdev->dev, false); |
184 | pm_runtime_put(&ch->tmu->pdev->dev); | |
61a53bfa RW |
185 | } |
186 | ||
de2d12c7 | 187 | static void sh_tmu_set_next(struct sh_tmu_channel *ch, unsigned long delta, |
9570ef20 MD |
188 | int periodic) |
189 | { | |
190 | /* stop timer */ | |
de2d12c7 | 191 | sh_tmu_start_stop_ch(ch, 0); |
9570ef20 MD |
192 | |
193 | /* acknowledge interrupt */ | |
de2d12c7 | 194 | sh_tmu_read(ch, TCR); |
9570ef20 MD |
195 | |
196 | /* enable interrupt */ | |
de2d12c7 | 197 | sh_tmu_write(ch, TCR, 0x0020); |
9570ef20 MD |
198 | |
199 | /* reload delta value in case of periodic timer */ | |
200 | if (periodic) | |
de2d12c7 | 201 | sh_tmu_write(ch, TCOR, delta); |
9570ef20 | 202 | else |
de2d12c7 | 203 | sh_tmu_write(ch, TCOR, 0xffffffff); |
9570ef20 | 204 | |
de2d12c7 | 205 | sh_tmu_write(ch, TCNT, delta); |
9570ef20 MD |
206 | |
207 | /* start timer */ | |
de2d12c7 | 208 | sh_tmu_start_stop_ch(ch, 1); |
9570ef20 MD |
209 | } |
210 | ||
211 | static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id) | |
212 | { | |
de2d12c7 | 213 | struct sh_tmu_channel *ch = dev_id; |
9570ef20 MD |
214 | |
215 | /* disable or acknowledge interrupt */ | |
de2d12c7 LP |
216 | if (ch->ced.mode == CLOCK_EVT_MODE_ONESHOT) |
217 | sh_tmu_write(ch, TCR, 0x0000); | |
9570ef20 | 218 | else |
de2d12c7 | 219 | sh_tmu_write(ch, TCR, 0x0020); |
9570ef20 MD |
220 | |
221 | /* notify clockevent layer */ | |
de2d12c7 | 222 | ch->ced.event_handler(&ch->ced); |
9570ef20 MD |
223 | return IRQ_HANDLED; |
224 | } | |
225 | ||
de2d12c7 | 226 | static struct sh_tmu_channel *cs_to_sh_tmu(struct clocksource *cs) |
9570ef20 | 227 | { |
de2d12c7 | 228 | return container_of(cs, struct sh_tmu_channel, cs); |
9570ef20 MD |
229 | } |
230 | ||
231 | static cycle_t sh_tmu_clocksource_read(struct clocksource *cs) | |
232 | { | |
de2d12c7 | 233 | struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); |
9570ef20 | 234 | |
de2d12c7 | 235 | return sh_tmu_read(ch, TCNT) ^ 0xffffffff; |
9570ef20 MD |
236 | } |
237 | ||
238 | static int sh_tmu_clocksource_enable(struct clocksource *cs) | |
239 | { | |
de2d12c7 | 240 | struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); |
0aeac458 | 241 | int ret; |
9570ef20 | 242 | |
de2d12c7 | 243 | if (WARN_ON(ch->cs_enabled)) |
61a53bfa RW |
244 | return 0; |
245 | ||
de2d12c7 | 246 | ret = sh_tmu_enable(ch); |
eaa49a8c | 247 | if (!ret) { |
de2d12c7 LP |
248 | __clocksource_updatefreq_hz(cs, ch->rate); |
249 | ch->cs_enabled = true; | |
eaa49a8c | 250 | } |
61a53bfa | 251 | |
0aeac458 | 252 | return ret; |
9570ef20 MD |
253 | } |
254 | ||
255 | static void sh_tmu_clocksource_disable(struct clocksource *cs) | |
256 | { | |
de2d12c7 | 257 | struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); |
eaa49a8c | 258 | |
de2d12c7 | 259 | if (WARN_ON(!ch->cs_enabled)) |
61a53bfa | 260 | return; |
eaa49a8c | 261 | |
de2d12c7 LP |
262 | sh_tmu_disable(ch); |
263 | ch->cs_enabled = false; | |
eaa49a8c RW |
264 | } |
265 | ||
266 | static void sh_tmu_clocksource_suspend(struct clocksource *cs) | |
267 | { | |
de2d12c7 | 268 | struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); |
eaa49a8c | 269 | |
de2d12c7 | 270 | if (!ch->cs_enabled) |
61a53bfa | 271 | return; |
eaa49a8c | 272 | |
de2d12c7 LP |
273 | if (--ch->enable_count == 0) { |
274 | __sh_tmu_disable(ch); | |
275 | pm_genpd_syscore_poweroff(&ch->tmu->pdev->dev); | |
61a53bfa | 276 | } |
eaa49a8c RW |
277 | } |
278 | ||
279 | static void sh_tmu_clocksource_resume(struct clocksource *cs) | |
280 | { | |
de2d12c7 | 281 | struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); |
eaa49a8c | 282 | |
de2d12c7 | 283 | if (!ch->cs_enabled) |
61a53bfa RW |
284 | return; |
285 | ||
de2d12c7 LP |
286 | if (ch->enable_count++ == 0) { |
287 | pm_genpd_syscore_poweron(&ch->tmu->pdev->dev); | |
288 | __sh_tmu_enable(ch); | |
61a53bfa | 289 | } |
9570ef20 MD |
290 | } |
291 | ||
de2d12c7 | 292 | static int sh_tmu_register_clocksource(struct sh_tmu_channel *ch, |
84876d05 | 293 | const char *name, unsigned long rating) |
9570ef20 | 294 | { |
de2d12c7 | 295 | struct clocksource *cs = &ch->cs; |
9570ef20 MD |
296 | |
297 | cs->name = name; | |
298 | cs->rating = rating; | |
299 | cs->read = sh_tmu_clocksource_read; | |
300 | cs->enable = sh_tmu_clocksource_enable; | |
301 | cs->disable = sh_tmu_clocksource_disable; | |
eaa49a8c RW |
302 | cs->suspend = sh_tmu_clocksource_suspend; |
303 | cs->resume = sh_tmu_clocksource_resume; | |
9570ef20 MD |
304 | cs->mask = CLOCKSOURCE_MASK(32); |
305 | cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; | |
66f49121 | 306 | |
fe68eb80 LP |
307 | dev_info(&ch->tmu->pdev->dev, "ch%u: used as clock source\n", |
308 | ch->index); | |
0aeac458 MD |
309 | |
310 | /* Register with dummy 1 Hz value, gets updated in ->enable() */ | |
311 | clocksource_register_hz(cs, 1); | |
9570ef20 MD |
312 | return 0; |
313 | } | |
314 | ||
de2d12c7 | 315 | static struct sh_tmu_channel *ced_to_sh_tmu(struct clock_event_device *ced) |
9570ef20 | 316 | { |
de2d12c7 | 317 | return container_of(ced, struct sh_tmu_channel, ced); |
9570ef20 MD |
318 | } |
319 | ||
de2d12c7 | 320 | static void sh_tmu_clock_event_start(struct sh_tmu_channel *ch, int periodic) |
9570ef20 | 321 | { |
de2d12c7 | 322 | struct clock_event_device *ced = &ch->ced; |
9570ef20 | 323 | |
de2d12c7 | 324 | sh_tmu_enable(ch); |
9570ef20 | 325 | |
de2d12c7 | 326 | clockevents_config(ced, ch->rate); |
9570ef20 MD |
327 | |
328 | if (periodic) { | |
de2d12c7 LP |
329 | ch->periodic = (ch->rate + HZ/2) / HZ; |
330 | sh_tmu_set_next(ch, ch->periodic, 1); | |
9570ef20 MD |
331 | } |
332 | } | |
333 | ||
334 | static void sh_tmu_clock_event_mode(enum clock_event_mode mode, | |
335 | struct clock_event_device *ced) | |
336 | { | |
de2d12c7 | 337 | struct sh_tmu_channel *ch = ced_to_sh_tmu(ced); |
9570ef20 MD |
338 | int disabled = 0; |
339 | ||
340 | /* deal with old setting first */ | |
341 | switch (ced->mode) { | |
342 | case CLOCK_EVT_MODE_PERIODIC: | |
343 | case CLOCK_EVT_MODE_ONESHOT: | |
de2d12c7 | 344 | sh_tmu_disable(ch); |
9570ef20 MD |
345 | disabled = 1; |
346 | break; | |
347 | default: | |
348 | break; | |
349 | } | |
350 | ||
351 | switch (mode) { | |
352 | case CLOCK_EVT_MODE_PERIODIC: | |
de2d12c7 | 353 | dev_info(&ch->tmu->pdev->dev, |
fe68eb80 | 354 | "ch%u: used for periodic clock events\n", ch->index); |
de2d12c7 | 355 | sh_tmu_clock_event_start(ch, 1); |
9570ef20 MD |
356 | break; |
357 | case CLOCK_EVT_MODE_ONESHOT: | |
de2d12c7 | 358 | dev_info(&ch->tmu->pdev->dev, |
fe68eb80 | 359 | "ch%u: used for oneshot clock events\n", ch->index); |
de2d12c7 | 360 | sh_tmu_clock_event_start(ch, 0); |
9570ef20 MD |
361 | break; |
362 | case CLOCK_EVT_MODE_UNUSED: | |
363 | if (!disabled) | |
de2d12c7 | 364 | sh_tmu_disable(ch); |
9570ef20 MD |
365 | break; |
366 | case CLOCK_EVT_MODE_SHUTDOWN: | |
367 | default: | |
368 | break; | |
369 | } | |
370 | } | |
371 | ||
372 | static int sh_tmu_clock_event_next(unsigned long delta, | |
373 | struct clock_event_device *ced) | |
374 | { | |
de2d12c7 | 375 | struct sh_tmu_channel *ch = ced_to_sh_tmu(ced); |
9570ef20 MD |
376 | |
377 | BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT); | |
378 | ||
379 | /* program new delta value */ | |
de2d12c7 | 380 | sh_tmu_set_next(ch, delta, 0); |
9570ef20 MD |
381 | return 0; |
382 | } | |
383 | ||
eaa49a8c RW |
384 | static void sh_tmu_clock_event_suspend(struct clock_event_device *ced) |
385 | { | |
de2d12c7 | 386 | pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced)->tmu->pdev->dev); |
eaa49a8c RW |
387 | } |
388 | ||
389 | static void sh_tmu_clock_event_resume(struct clock_event_device *ced) | |
390 | { | |
de2d12c7 | 391 | pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced)->tmu->pdev->dev); |
eaa49a8c RW |
392 | } |
393 | ||
de2d12c7 | 394 | static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch, |
84876d05 | 395 | const char *name, unsigned long rating) |
9570ef20 | 396 | { |
de2d12c7 | 397 | struct clock_event_device *ced = &ch->ced; |
9570ef20 MD |
398 | int ret; |
399 | ||
400 | memset(ced, 0, sizeof(*ced)); | |
401 | ||
402 | ced->name = name; | |
403 | ced->features = CLOCK_EVT_FEAT_PERIODIC; | |
404 | ced->features |= CLOCK_EVT_FEAT_ONESHOT; | |
405 | ced->rating = rating; | |
406 | ced->cpumask = cpumask_of(0); | |
407 | ced->set_next_event = sh_tmu_clock_event_next; | |
408 | ced->set_mode = sh_tmu_clock_event_mode; | |
eaa49a8c RW |
409 | ced->suspend = sh_tmu_clock_event_suspend; |
410 | ced->resume = sh_tmu_clock_event_resume; | |
9570ef20 | 411 | |
fe68eb80 LP |
412 | dev_info(&ch->tmu->pdev->dev, "ch%u: used for clock events\n", |
413 | ch->index); | |
3977407e PM |
414 | |
415 | clockevents_config_and_register(ced, 1, 0x300, 0xffffffff); | |
da64c2a8 | 416 | |
de2d12c7 | 417 | ret = request_irq(ch->irq, sh_tmu_interrupt, |
1c56cf6b | 418 | IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, |
de2d12c7 | 419 | dev_name(&ch->tmu->pdev->dev), ch); |
9570ef20 | 420 | if (ret) { |
fe68eb80 LP |
421 | dev_err(&ch->tmu->pdev->dev, "ch%u: failed to request irq %d\n", |
422 | ch->index, ch->irq); | |
9570ef20 MD |
423 | return; |
424 | } | |
9570ef20 MD |
425 | } |
426 | ||
84876d05 | 427 | static int sh_tmu_register(struct sh_tmu_channel *ch, const char *name, |
9570ef20 MD |
428 | unsigned long clockevent_rating, |
429 | unsigned long clocksource_rating) | |
430 | { | |
431 | if (clockevent_rating) | |
de2d12c7 | 432 | sh_tmu_register_clockevent(ch, name, clockevent_rating); |
9570ef20 | 433 | else if (clocksource_rating) |
de2d12c7 | 434 | sh_tmu_register_clocksource(ch, name, clocksource_rating); |
9570ef20 MD |
435 | |
436 | return 0; | |
437 | } | |
438 | ||
a94ddaa6 LP |
439 | static int sh_tmu_channel_setup(struct sh_tmu_channel *ch, |
440 | struct sh_tmu_device *tmu) | |
441 | { | |
442 | struct sh_timer_config *cfg = tmu->pdev->dev.platform_data; | |
443 | ||
444 | memset(ch, 0, sizeof(*ch)); | |
445 | ch->tmu = tmu; | |
446 | ||
fe68eb80 LP |
447 | /* |
448 | * The SH3 variant (SH770x, SH7705, SH7710 and SH7720) maps channel | |
449 | * registers blocks at base + 2 + 12 * index, while all other variants | |
450 | * map them at base + 4 + 12 * index. We can compute the index by just | |
451 | * dividing by 12, the 2 bytes or 4 bytes offset being hidden by the | |
452 | * integer division. | |
453 | */ | |
454 | ch->index = cfg->channel_offset / 12; | |
455 | ||
a94ddaa6 LP |
456 | ch->irq = platform_get_irq(tmu->pdev, 0); |
457 | if (ch->irq < 0) { | |
fe68eb80 LP |
458 | dev_err(&tmu->pdev->dev, "ch%u: failed to get irq\n", |
459 | ch->index); | |
a94ddaa6 LP |
460 | return ch->irq; |
461 | } | |
462 | ||
463 | ch->cs_enabled = false; | |
464 | ch->enable_count = 0; | |
465 | ||
84876d05 | 466 | return sh_tmu_register(ch, dev_name(&tmu->pdev->dev), |
a94ddaa6 LP |
467 | cfg->clockevent_rating, |
468 | cfg->clocksource_rating); | |
469 | } | |
470 | ||
0a72aa39 | 471 | static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev) |
9570ef20 | 472 | { |
46a12f74 | 473 | struct sh_timer_config *cfg = pdev->dev.platform_data; |
9570ef20 | 474 | struct resource *res; |
1c56cf6b | 475 | int ret; |
9570ef20 MD |
476 | ret = -ENXIO; |
477 | ||
0a72aa39 LP |
478 | memset(tmu, 0, sizeof(*tmu)); |
479 | tmu->pdev = pdev; | |
9570ef20 MD |
480 | |
481 | if (!cfg) { | |
0a72aa39 | 482 | dev_err(&tmu->pdev->dev, "missing platform data\n"); |
9570ef20 MD |
483 | goto err0; |
484 | } | |
485 | ||
0a72aa39 | 486 | platform_set_drvdata(pdev, tmu); |
9570ef20 | 487 | |
0a72aa39 | 488 | res = platform_get_resource(tmu->pdev, IORESOURCE_MEM, 0); |
9570ef20 | 489 | if (!res) { |
0a72aa39 | 490 | dev_err(&tmu->pdev->dev, "failed to get I/O memory\n"); |
9570ef20 MD |
491 | goto err0; |
492 | } | |
493 | ||
de693461 LP |
494 | /* |
495 | * Map memory, let channel.base point to our channel and mapbase to the | |
496 | * start/stop shared register. | |
497 | */ | |
498 | tmu->channel.base = ioremap_nocache(res->start, resource_size(res)); | |
499 | if (tmu->channel.base == NULL) { | |
0a72aa39 | 500 | dev_err(&tmu->pdev->dev, "failed to remap I/O memory\n"); |
9570ef20 MD |
501 | goto err0; |
502 | } | |
503 | ||
de693461 LP |
504 | tmu->mapbase = tmu->channel.base - cfg->channel_offset; |
505 | ||
9570ef20 | 506 | /* get hold of clock */ |
0a72aa39 LP |
507 | tmu->clk = clk_get(&tmu->pdev->dev, "tmu_fck"); |
508 | if (IS_ERR(tmu->clk)) { | |
509 | dev_err(&tmu->pdev->dev, "cannot get clock\n"); | |
510 | ret = PTR_ERR(tmu->clk); | |
03ff858c | 511 | goto err1; |
9570ef20 | 512 | } |
1c09eb3e | 513 | |
0a72aa39 | 514 | ret = clk_prepare(tmu->clk); |
1c09eb3e LP |
515 | if (ret < 0) |
516 | goto err2; | |
517 | ||
a94ddaa6 | 518 | ret = sh_tmu_channel_setup(&tmu->channel, tmu); |
394a4486 | 519 | if (ret < 0) |
1c09eb3e | 520 | goto err3; |
394a4486 LP |
521 | |
522 | return 0; | |
523 | ||
1c09eb3e | 524 | err3: |
0a72aa39 | 525 | clk_unprepare(tmu->clk); |
394a4486 | 526 | err2: |
0a72aa39 | 527 | clk_put(tmu->clk); |
9570ef20 | 528 | err1: |
de693461 | 529 | iounmap(tmu->channel.base); |
9570ef20 MD |
530 | err0: |
531 | return ret; | |
532 | } | |
533 | ||
1850514b | 534 | static int sh_tmu_probe(struct platform_device *pdev) |
9570ef20 | 535 | { |
0a72aa39 | 536 | struct sh_tmu_device *tmu = platform_get_drvdata(pdev); |
61a53bfa | 537 | struct sh_timer_config *cfg = pdev->dev.platform_data; |
9570ef20 MD |
538 | int ret; |
539 | ||
eaa49a8c | 540 | if (!is_early_platform_device(pdev)) { |
61a53bfa RW |
541 | pm_runtime_set_active(&pdev->dev); |
542 | pm_runtime_enable(&pdev->dev); | |
eaa49a8c | 543 | } |
2ee619f9 | 544 | |
0a72aa39 | 545 | if (tmu) { |
214a607a | 546 | dev_info(&pdev->dev, "kept as earlytimer\n"); |
61a53bfa | 547 | goto out; |
9570ef20 MD |
548 | } |
549 | ||
0a72aa39 LP |
550 | tmu = kmalloc(sizeof(*tmu), GFP_KERNEL); |
551 | if (tmu == NULL) { | |
9570ef20 MD |
552 | dev_err(&pdev->dev, "failed to allocate driver data\n"); |
553 | return -ENOMEM; | |
554 | } | |
555 | ||
0a72aa39 | 556 | ret = sh_tmu_setup(tmu, pdev); |
9570ef20 | 557 | if (ret) { |
0a72aa39 | 558 | kfree(tmu); |
61a53bfa RW |
559 | pm_runtime_idle(&pdev->dev); |
560 | return ret; | |
9570ef20 | 561 | } |
61a53bfa RW |
562 | if (is_early_platform_device(pdev)) |
563 | return 0; | |
564 | ||
565 | out: | |
566 | if (cfg->clockevent_rating || cfg->clocksource_rating) | |
567 | pm_runtime_irq_safe(&pdev->dev); | |
568 | else | |
569 | pm_runtime_idle(&pdev->dev); | |
570 | ||
571 | return 0; | |
9570ef20 MD |
572 | } |
573 | ||
1850514b | 574 | static int sh_tmu_remove(struct platform_device *pdev) |
9570ef20 MD |
575 | { |
576 | return -EBUSY; /* cannot unregister clockevent and clocksource */ | |
577 | } | |
578 | ||
579 | static struct platform_driver sh_tmu_device_driver = { | |
580 | .probe = sh_tmu_probe, | |
1850514b | 581 | .remove = sh_tmu_remove, |
9570ef20 MD |
582 | .driver = { |
583 | .name = "sh_tmu", | |
584 | } | |
585 | }; | |
586 | ||
587 | static int __init sh_tmu_init(void) | |
588 | { | |
589 | return platform_driver_register(&sh_tmu_device_driver); | |
590 | } | |
591 | ||
592 | static void __exit sh_tmu_exit(void) | |
593 | { | |
594 | platform_driver_unregister(&sh_tmu_device_driver); | |
595 | } | |
596 | ||
597 | early_platform_init("earlytimer", &sh_tmu_device_driver); | |
b9773c3f | 598 | subsys_initcall(sh_tmu_init); |
9570ef20 MD |
599 | module_exit(sh_tmu_exit); |
600 | ||
601 | MODULE_AUTHOR("Magnus Damm"); | |
602 | MODULE_DESCRIPTION("SuperH TMU Timer Driver"); | |
603 | MODULE_LICENSE("GPL v2"); |