clocksource: sh_tmu: Rename clock to "fck" in the non-legacy case
[deliverable/linux.git] / drivers / clocksource / sh_tmu.c
1 /*
2 * SuperH Timer Support - TMU
3 *
4 * Copyright (C) 2009 Magnus Damm
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20 #include <linux/init.h>
21 #include <linux/platform_device.h>
22 #include <linux/spinlock.h>
23 #include <linux/interrupt.h>
24 #include <linux/ioport.h>
25 #include <linux/delay.h>
26 #include <linux/io.h>
27 #include <linux/clk.h>
28 #include <linux/irq.h>
29 #include <linux/err.h>
30 #include <linux/clocksource.h>
31 #include <linux/clockchips.h>
32 #include <linux/sh_timer.h>
33 #include <linux/slab.h>
34 #include <linux/module.h>
35 #include <linux/pm_domain.h>
36 #include <linux/pm_runtime.h>
37
38 enum sh_tmu_model {
39 SH_TMU_LEGACY,
40 SH_TMU,
41 SH_TMU_SH3,
42 };
43
44 struct sh_tmu_device;
45
46 struct sh_tmu_channel {
47 struct sh_tmu_device *tmu;
48 unsigned int index;
49
50 void __iomem *base;
51 int irq;
52
53 unsigned long rate;
54 unsigned long periodic;
55 struct clock_event_device ced;
56 struct clocksource cs;
57 bool cs_enabled;
58 unsigned int enable_count;
59 };
60
61 struct sh_tmu_device {
62 struct platform_device *pdev;
63
64 void __iomem *mapbase;
65 struct clk *clk;
66
67 enum sh_tmu_model model;
68
69 struct sh_tmu_channel *channels;
70 unsigned int num_channels;
71
72 bool has_clockevent;
73 bool has_clocksource;
74 };
75
76 static DEFINE_RAW_SPINLOCK(sh_tmu_lock);
77
78 #define TSTR -1 /* shared register */
79 #define TCOR 0 /* channel register */
80 #define TCNT 1 /* channel register */
81 #define TCR 2 /* channel register */
82
83 #define TCR_UNF (1 << 8)
84 #define TCR_UNIE (1 << 5)
85 #define TCR_TPSC_CLK4 (0 << 0)
86 #define TCR_TPSC_CLK16 (1 << 0)
87 #define TCR_TPSC_CLK64 (2 << 0)
88 #define TCR_TPSC_CLK256 (3 << 0)
89 #define TCR_TPSC_CLK1024 (4 << 0)
90 #define TCR_TPSC_MASK (7 << 0)
91
92 static inline unsigned long sh_tmu_read(struct sh_tmu_channel *ch, int reg_nr)
93 {
94 unsigned long offs;
95
96 if (reg_nr == TSTR) {
97 switch (ch->tmu->model) {
98 case SH_TMU_LEGACY:
99 return ioread8(ch->tmu->mapbase);
100 case SH_TMU_SH3:
101 return ioread8(ch->tmu->mapbase + 2);
102 case SH_TMU:
103 return ioread8(ch->tmu->mapbase + 4);
104 }
105 }
106
107 offs = reg_nr << 2;
108
109 if (reg_nr == TCR)
110 return ioread16(ch->base + offs);
111 else
112 return ioread32(ch->base + offs);
113 }
114
115 static inline void sh_tmu_write(struct sh_tmu_channel *ch, int reg_nr,
116 unsigned long value)
117 {
118 unsigned long offs;
119
120 if (reg_nr == TSTR) {
121 switch (ch->tmu->model) {
122 case SH_TMU_LEGACY:
123 return iowrite8(value, ch->tmu->mapbase);
124 case SH_TMU_SH3:
125 return iowrite8(value, ch->tmu->mapbase + 2);
126 case SH_TMU:
127 return iowrite8(value, ch->tmu->mapbase + 4);
128 }
129 }
130
131 offs = reg_nr << 2;
132
133 if (reg_nr == TCR)
134 iowrite16(value, ch->base + offs);
135 else
136 iowrite32(value, ch->base + offs);
137 }
138
139 static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start)
140 {
141 unsigned long flags, value;
142
143 /* start stop register shared by multiple timer channels */
144 raw_spin_lock_irqsave(&sh_tmu_lock, flags);
145 value = sh_tmu_read(ch, TSTR);
146
147 if (start)
148 value |= 1 << ch->index;
149 else
150 value &= ~(1 << ch->index);
151
152 sh_tmu_write(ch, TSTR, value);
153 raw_spin_unlock_irqrestore(&sh_tmu_lock, flags);
154 }
155
156 static int __sh_tmu_enable(struct sh_tmu_channel *ch)
157 {
158 int ret;
159
160 /* enable clock */
161 ret = clk_enable(ch->tmu->clk);
162 if (ret) {
163 dev_err(&ch->tmu->pdev->dev, "ch%u: cannot enable clock\n",
164 ch->index);
165 return ret;
166 }
167
168 /* make sure channel is disabled */
169 sh_tmu_start_stop_ch(ch, 0);
170
171 /* maximum timeout */
172 sh_tmu_write(ch, TCOR, 0xffffffff);
173 sh_tmu_write(ch, TCNT, 0xffffffff);
174
175 /* configure channel to parent clock / 4, irq off */
176 ch->rate = clk_get_rate(ch->tmu->clk) / 4;
177 sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
178
179 /* enable channel */
180 sh_tmu_start_stop_ch(ch, 1);
181
182 return 0;
183 }
184
185 static int sh_tmu_enable(struct sh_tmu_channel *ch)
186 {
187 if (ch->enable_count++ > 0)
188 return 0;
189
190 pm_runtime_get_sync(&ch->tmu->pdev->dev);
191 dev_pm_syscore_device(&ch->tmu->pdev->dev, true);
192
193 return __sh_tmu_enable(ch);
194 }
195
196 static void __sh_tmu_disable(struct sh_tmu_channel *ch)
197 {
198 /* disable channel */
199 sh_tmu_start_stop_ch(ch, 0);
200
201 /* disable interrupts in TMU block */
202 sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
203
204 /* stop clock */
205 clk_disable(ch->tmu->clk);
206 }
207
208 static void sh_tmu_disable(struct sh_tmu_channel *ch)
209 {
210 if (WARN_ON(ch->enable_count == 0))
211 return;
212
213 if (--ch->enable_count > 0)
214 return;
215
216 __sh_tmu_disable(ch);
217
218 dev_pm_syscore_device(&ch->tmu->pdev->dev, false);
219 pm_runtime_put(&ch->tmu->pdev->dev);
220 }
221
222 static void sh_tmu_set_next(struct sh_tmu_channel *ch, unsigned long delta,
223 int periodic)
224 {
225 /* stop timer */
226 sh_tmu_start_stop_ch(ch, 0);
227
228 /* acknowledge interrupt */
229 sh_tmu_read(ch, TCR);
230
231 /* enable interrupt */
232 sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);
233
234 /* reload delta value in case of periodic timer */
235 if (periodic)
236 sh_tmu_write(ch, TCOR, delta);
237 else
238 sh_tmu_write(ch, TCOR, 0xffffffff);
239
240 sh_tmu_write(ch, TCNT, delta);
241
242 /* start timer */
243 sh_tmu_start_stop_ch(ch, 1);
244 }
245
246 static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id)
247 {
248 struct sh_tmu_channel *ch = dev_id;
249
250 /* disable or acknowledge interrupt */
251 if (ch->ced.mode == CLOCK_EVT_MODE_ONESHOT)
252 sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
253 else
254 sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);
255
256 /* notify clockevent layer */
257 ch->ced.event_handler(&ch->ced);
258 return IRQ_HANDLED;
259 }
260
261 static struct sh_tmu_channel *cs_to_sh_tmu(struct clocksource *cs)
262 {
263 return container_of(cs, struct sh_tmu_channel, cs);
264 }
265
266 static cycle_t sh_tmu_clocksource_read(struct clocksource *cs)
267 {
268 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
269
270 return sh_tmu_read(ch, TCNT) ^ 0xffffffff;
271 }
272
273 static int sh_tmu_clocksource_enable(struct clocksource *cs)
274 {
275 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
276 int ret;
277
278 if (WARN_ON(ch->cs_enabled))
279 return 0;
280
281 ret = sh_tmu_enable(ch);
282 if (!ret) {
283 __clocksource_updatefreq_hz(cs, ch->rate);
284 ch->cs_enabled = true;
285 }
286
287 return ret;
288 }
289
290 static void sh_tmu_clocksource_disable(struct clocksource *cs)
291 {
292 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
293
294 if (WARN_ON(!ch->cs_enabled))
295 return;
296
297 sh_tmu_disable(ch);
298 ch->cs_enabled = false;
299 }
300
301 static void sh_tmu_clocksource_suspend(struct clocksource *cs)
302 {
303 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
304
305 if (!ch->cs_enabled)
306 return;
307
308 if (--ch->enable_count == 0) {
309 __sh_tmu_disable(ch);
310 pm_genpd_syscore_poweroff(&ch->tmu->pdev->dev);
311 }
312 }
313
314 static void sh_tmu_clocksource_resume(struct clocksource *cs)
315 {
316 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
317
318 if (!ch->cs_enabled)
319 return;
320
321 if (ch->enable_count++ == 0) {
322 pm_genpd_syscore_poweron(&ch->tmu->pdev->dev);
323 __sh_tmu_enable(ch);
324 }
325 }
326
327 static int sh_tmu_register_clocksource(struct sh_tmu_channel *ch,
328 const char *name)
329 {
330 struct clocksource *cs = &ch->cs;
331
332 cs->name = name;
333 cs->rating = 200;
334 cs->read = sh_tmu_clocksource_read;
335 cs->enable = sh_tmu_clocksource_enable;
336 cs->disable = sh_tmu_clocksource_disable;
337 cs->suspend = sh_tmu_clocksource_suspend;
338 cs->resume = sh_tmu_clocksource_resume;
339 cs->mask = CLOCKSOURCE_MASK(32);
340 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
341
342 dev_info(&ch->tmu->pdev->dev, "ch%u: used as clock source\n",
343 ch->index);
344
345 /* Register with dummy 1 Hz value, gets updated in ->enable() */
346 clocksource_register_hz(cs, 1);
347 return 0;
348 }
349
350 static struct sh_tmu_channel *ced_to_sh_tmu(struct clock_event_device *ced)
351 {
352 return container_of(ced, struct sh_tmu_channel, ced);
353 }
354
355 static void sh_tmu_clock_event_start(struct sh_tmu_channel *ch, int periodic)
356 {
357 struct clock_event_device *ced = &ch->ced;
358
359 sh_tmu_enable(ch);
360
361 clockevents_config(ced, ch->rate);
362
363 if (periodic) {
364 ch->periodic = (ch->rate + HZ/2) / HZ;
365 sh_tmu_set_next(ch, ch->periodic, 1);
366 }
367 }
368
369 static void sh_tmu_clock_event_mode(enum clock_event_mode mode,
370 struct clock_event_device *ced)
371 {
372 struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
373 int disabled = 0;
374
375 /* deal with old setting first */
376 switch (ced->mode) {
377 case CLOCK_EVT_MODE_PERIODIC:
378 case CLOCK_EVT_MODE_ONESHOT:
379 sh_tmu_disable(ch);
380 disabled = 1;
381 break;
382 default:
383 break;
384 }
385
386 switch (mode) {
387 case CLOCK_EVT_MODE_PERIODIC:
388 dev_info(&ch->tmu->pdev->dev,
389 "ch%u: used for periodic clock events\n", ch->index);
390 sh_tmu_clock_event_start(ch, 1);
391 break;
392 case CLOCK_EVT_MODE_ONESHOT:
393 dev_info(&ch->tmu->pdev->dev,
394 "ch%u: used for oneshot clock events\n", ch->index);
395 sh_tmu_clock_event_start(ch, 0);
396 break;
397 case CLOCK_EVT_MODE_UNUSED:
398 if (!disabled)
399 sh_tmu_disable(ch);
400 break;
401 case CLOCK_EVT_MODE_SHUTDOWN:
402 default:
403 break;
404 }
405 }
406
407 static int sh_tmu_clock_event_next(unsigned long delta,
408 struct clock_event_device *ced)
409 {
410 struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
411
412 BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
413
414 /* program new delta value */
415 sh_tmu_set_next(ch, delta, 0);
416 return 0;
417 }
418
419 static void sh_tmu_clock_event_suspend(struct clock_event_device *ced)
420 {
421 pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced)->tmu->pdev->dev);
422 }
423
424 static void sh_tmu_clock_event_resume(struct clock_event_device *ced)
425 {
426 pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced)->tmu->pdev->dev);
427 }
428
429 static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch,
430 const char *name)
431 {
432 struct clock_event_device *ced = &ch->ced;
433 int ret;
434
435 ced->name = name;
436 ced->features = CLOCK_EVT_FEAT_PERIODIC;
437 ced->features |= CLOCK_EVT_FEAT_ONESHOT;
438 ced->rating = 200;
439 ced->cpumask = cpumask_of(0);
440 ced->set_next_event = sh_tmu_clock_event_next;
441 ced->set_mode = sh_tmu_clock_event_mode;
442 ced->suspend = sh_tmu_clock_event_suspend;
443 ced->resume = sh_tmu_clock_event_resume;
444
445 dev_info(&ch->tmu->pdev->dev, "ch%u: used for clock events\n",
446 ch->index);
447
448 clockevents_config_and_register(ced, 1, 0x300, 0xffffffff);
449
450 ret = request_irq(ch->irq, sh_tmu_interrupt,
451 IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
452 dev_name(&ch->tmu->pdev->dev), ch);
453 if (ret) {
454 dev_err(&ch->tmu->pdev->dev, "ch%u: failed to request irq %d\n",
455 ch->index, ch->irq);
456 return;
457 }
458 }
459
460 static int sh_tmu_register(struct sh_tmu_channel *ch, const char *name,
461 bool clockevent, bool clocksource)
462 {
463 if (clockevent) {
464 ch->tmu->has_clockevent = true;
465 sh_tmu_register_clockevent(ch, name);
466 } else if (clocksource) {
467 ch->tmu->has_clocksource = true;
468 sh_tmu_register_clocksource(ch, name);
469 }
470
471 return 0;
472 }
473
474 static int sh_tmu_channel_setup(struct sh_tmu_channel *ch, unsigned int index,
475 bool clockevent, bool clocksource,
476 struct sh_tmu_device *tmu)
477 {
478 /* Skip unused channels. */
479 if (!clockevent && !clocksource)
480 return 0;
481
482 ch->tmu = tmu;
483
484 if (tmu->model == SH_TMU_LEGACY) {
485 struct sh_timer_config *cfg = tmu->pdev->dev.platform_data;
486
487 /*
488 * The SH3 variant (SH770x, SH7705, SH7710 and SH7720) maps
489 * channel registers blocks at base + 2 + 12 * index, while all
490 * other variants map them at base + 4 + 12 * index. We can
491 * compute the index by just dividing by 12, the 2 bytes or 4
492 * bytes offset being hidden by the integer division.
493 */
494 ch->index = cfg->channel_offset / 12;
495 ch->base = tmu->mapbase + cfg->channel_offset;
496 } else {
497 ch->index = index;
498
499 if (tmu->model == SH_TMU_SH3)
500 ch->base = tmu->mapbase + 4 + ch->index * 12;
501 else
502 ch->base = tmu->mapbase + 8 + ch->index * 12;
503 }
504
505 ch->irq = platform_get_irq(tmu->pdev, ch->index);
506 if (ch->irq < 0) {
507 dev_err(&tmu->pdev->dev, "ch%u: failed to get irq\n",
508 ch->index);
509 return ch->irq;
510 }
511
512 ch->cs_enabled = false;
513 ch->enable_count = 0;
514
515 return sh_tmu_register(ch, dev_name(&tmu->pdev->dev),
516 clockevent, clocksource);
517 }
518
519 static int sh_tmu_map_memory(struct sh_tmu_device *tmu)
520 {
521 struct resource *res;
522
523 res = platform_get_resource(tmu->pdev, IORESOURCE_MEM, 0);
524 if (!res) {
525 dev_err(&tmu->pdev->dev, "failed to get I/O memory\n");
526 return -ENXIO;
527 }
528
529 tmu->mapbase = ioremap_nocache(res->start, resource_size(res));
530 if (tmu->mapbase == NULL)
531 return -ENXIO;
532
533 /*
534 * In legacy platform device configuration (with one device per channel)
535 * the resource points to the channel base address.
536 */
537 if (tmu->model == SH_TMU_LEGACY) {
538 struct sh_timer_config *cfg = tmu->pdev->dev.platform_data;
539 tmu->mapbase -= cfg->channel_offset;
540 }
541
542 return 0;
543 }
544
545 static void sh_tmu_unmap_memory(struct sh_tmu_device *tmu)
546 {
547 if (tmu->model == SH_TMU_LEGACY) {
548 struct sh_timer_config *cfg = tmu->pdev->dev.platform_data;
549 tmu->mapbase += cfg->channel_offset;
550 }
551
552 iounmap(tmu->mapbase);
553 }
554
555 static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev)
556 {
557 struct sh_timer_config *cfg = pdev->dev.platform_data;
558 const struct platform_device_id *id = pdev->id_entry;
559 unsigned int i;
560 int ret;
561
562 if (!cfg) {
563 dev_err(&tmu->pdev->dev, "missing platform data\n");
564 return -ENXIO;
565 }
566
567 tmu->pdev = pdev;
568 tmu->model = id->driver_data;
569
570 /* Get hold of clock. */
571 tmu->clk = clk_get(&tmu->pdev->dev,
572 tmu->model == SH_TMU_LEGACY ? "tmu_fck" : "fck");
573 if (IS_ERR(tmu->clk)) {
574 dev_err(&tmu->pdev->dev, "cannot get clock\n");
575 return PTR_ERR(tmu->clk);
576 }
577
578 ret = clk_prepare(tmu->clk);
579 if (ret < 0)
580 goto err_clk_put;
581
582 /* Map the memory resource. */
583 ret = sh_tmu_map_memory(tmu);
584 if (ret < 0) {
585 dev_err(&tmu->pdev->dev, "failed to remap I/O memory\n");
586 goto err_clk_unprepare;
587 }
588
589 /* Allocate and setup the channels. */
590 if (tmu->model == SH_TMU_LEGACY)
591 tmu->num_channels = 1;
592 else
593 tmu->num_channels = hweight8(cfg->channels_mask);
594
595 tmu->channels = kzalloc(sizeof(*tmu->channels) * tmu->num_channels,
596 GFP_KERNEL);
597 if (tmu->channels == NULL) {
598 ret = -ENOMEM;
599 goto err_unmap;
600 }
601
602 if (tmu->model == SH_TMU_LEGACY) {
603 ret = sh_tmu_channel_setup(&tmu->channels[0], 0,
604 cfg->clockevent_rating != 0,
605 cfg->clocksource_rating != 0, tmu);
606 if (ret < 0)
607 goto err_unmap;
608 } else {
609 /*
610 * Use the first channel as a clock event device and the second
611 * channel as a clock source.
612 */
613 for (i = 0; i < tmu->num_channels; ++i) {
614 ret = sh_tmu_channel_setup(&tmu->channels[i], i,
615 i == 0, i == 1, tmu);
616 if (ret < 0)
617 goto err_unmap;
618 }
619 }
620
621 platform_set_drvdata(pdev, tmu);
622
623 return 0;
624
625 err_unmap:
626 kfree(tmu->channels);
627 sh_tmu_unmap_memory(tmu);
628 err_clk_unprepare:
629 clk_unprepare(tmu->clk);
630 err_clk_put:
631 clk_put(tmu->clk);
632 return ret;
633 }
634
635 static int sh_tmu_probe(struct platform_device *pdev)
636 {
637 struct sh_tmu_device *tmu = platform_get_drvdata(pdev);
638 int ret;
639
640 if (!is_early_platform_device(pdev)) {
641 pm_runtime_set_active(&pdev->dev);
642 pm_runtime_enable(&pdev->dev);
643 }
644
645 if (tmu) {
646 dev_info(&pdev->dev, "kept as earlytimer\n");
647 goto out;
648 }
649
650 tmu = kzalloc(sizeof(*tmu), GFP_KERNEL);
651 if (tmu == NULL) {
652 dev_err(&pdev->dev, "failed to allocate driver data\n");
653 return -ENOMEM;
654 }
655
656 ret = sh_tmu_setup(tmu, pdev);
657 if (ret) {
658 kfree(tmu);
659 pm_runtime_idle(&pdev->dev);
660 return ret;
661 }
662 if (is_early_platform_device(pdev))
663 return 0;
664
665 out:
666 if (tmu->has_clockevent || tmu->has_clocksource)
667 pm_runtime_irq_safe(&pdev->dev);
668 else
669 pm_runtime_idle(&pdev->dev);
670
671 return 0;
672 }
673
674 static int sh_tmu_remove(struct platform_device *pdev)
675 {
676 return -EBUSY; /* cannot unregister clockevent and clocksource */
677 }
678
679 static const struct platform_device_id sh_tmu_id_table[] = {
680 { "sh_tmu", SH_TMU_LEGACY },
681 { "sh-tmu", SH_TMU },
682 { "sh-tmu-sh3", SH_TMU_SH3 },
683 { }
684 };
685 MODULE_DEVICE_TABLE(platform, sh_tmu_id_table);
686
687 static struct platform_driver sh_tmu_device_driver = {
688 .probe = sh_tmu_probe,
689 .remove = sh_tmu_remove,
690 .driver = {
691 .name = "sh_tmu",
692 },
693 .id_table = sh_tmu_id_table,
694 };
695
696 static int __init sh_tmu_init(void)
697 {
698 return platform_driver_register(&sh_tmu_device_driver);
699 }
700
701 static void __exit sh_tmu_exit(void)
702 {
703 platform_driver_unregister(&sh_tmu_device_driver);
704 }
705
706 early_platform_init("earlytimer", &sh_tmu_device_driver);
707 subsys_initcall(sh_tmu_init);
708 module_exit(sh_tmu_exit);
709
710 MODULE_AUTHOR("Magnus Damm");
711 MODULE_DESCRIPTION("SuperH TMU Timer Driver");
712 MODULE_LICENSE("GPL v2");
This page took 0.081736 seconds and 5 git commands to generate.