clocksource: sh_tmu: Replace hardcoded register values with macros
[deliverable/linux.git] / drivers / clocksource / sh_tmu.c
1 /*
2 * SuperH Timer Support - TMU
3 *
4 * Copyright (C) 2009 Magnus Damm
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20 #include <linux/init.h>
21 #include <linux/platform_device.h>
22 #include <linux/spinlock.h>
23 #include <linux/interrupt.h>
24 #include <linux/ioport.h>
25 #include <linux/delay.h>
26 #include <linux/io.h>
27 #include <linux/clk.h>
28 #include <linux/irq.h>
29 #include <linux/err.h>
30 #include <linux/clocksource.h>
31 #include <linux/clockchips.h>
32 #include <linux/sh_timer.h>
33 #include <linux/slab.h>
34 #include <linux/module.h>
35 #include <linux/pm_domain.h>
36 #include <linux/pm_runtime.h>
37
38 struct sh_tmu_device;
39
40 struct sh_tmu_channel {
41 struct sh_tmu_device *tmu;
42 unsigned int index;
43
44 void __iomem *base;
45 int irq;
46
47 unsigned long rate;
48 unsigned long periodic;
49 struct clock_event_device ced;
50 struct clocksource cs;
51 bool cs_enabled;
52 unsigned int enable_count;
53 };
54
55 struct sh_tmu_device {
56 struct platform_device *pdev;
57
58 void __iomem *mapbase;
59 struct clk *clk;
60
61 struct sh_tmu_channel *channels;
62 unsigned int num_channels;
63 };
64
65 static DEFINE_RAW_SPINLOCK(sh_tmu_lock);
66
67 #define TSTR -1 /* shared register */
68 #define TCOR 0 /* channel register */
69 #define TCNT 1 /* channel register */
70 #define TCR 2 /* channel register */
71
72 #define TCR_UNF (1 << 8)
73 #define TCR_UNIE (1 << 5)
74 #define TCR_TPSC_CLK4 (0 << 0)
75 #define TCR_TPSC_CLK16 (1 << 0)
76 #define TCR_TPSC_CLK64 (2 << 0)
77 #define TCR_TPSC_CLK256 (3 << 0)
78 #define TCR_TPSC_CLK1024 (4 << 0)
79 #define TCR_TPSC_MASK (7 << 0)
80
81 static inline unsigned long sh_tmu_read(struct sh_tmu_channel *ch, int reg_nr)
82 {
83 unsigned long offs;
84
85 if (reg_nr == TSTR)
86 return ioread8(ch->tmu->mapbase);
87
88 offs = reg_nr << 2;
89
90 if (reg_nr == TCR)
91 return ioread16(ch->base + offs);
92 else
93 return ioread32(ch->base + offs);
94 }
95
96 static inline void sh_tmu_write(struct sh_tmu_channel *ch, int reg_nr,
97 unsigned long value)
98 {
99 unsigned long offs;
100
101 if (reg_nr == TSTR) {
102 iowrite8(value, ch->tmu->mapbase);
103 return;
104 }
105
106 offs = reg_nr << 2;
107
108 if (reg_nr == TCR)
109 iowrite16(value, ch->base + offs);
110 else
111 iowrite32(value, ch->base + offs);
112 }
113
114 static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start)
115 {
116 unsigned long flags, value;
117
118 /* start stop register shared by multiple timer channels */
119 raw_spin_lock_irqsave(&sh_tmu_lock, flags);
120 value = sh_tmu_read(ch, TSTR);
121
122 if (start)
123 value |= 1 << ch->index;
124 else
125 value &= ~(1 << ch->index);
126
127 sh_tmu_write(ch, TSTR, value);
128 raw_spin_unlock_irqrestore(&sh_tmu_lock, flags);
129 }
130
131 static int __sh_tmu_enable(struct sh_tmu_channel *ch)
132 {
133 int ret;
134
135 /* enable clock */
136 ret = clk_enable(ch->tmu->clk);
137 if (ret) {
138 dev_err(&ch->tmu->pdev->dev, "ch%u: cannot enable clock\n",
139 ch->index);
140 return ret;
141 }
142
143 /* make sure channel is disabled */
144 sh_tmu_start_stop_ch(ch, 0);
145
146 /* maximum timeout */
147 sh_tmu_write(ch, TCOR, 0xffffffff);
148 sh_tmu_write(ch, TCNT, 0xffffffff);
149
150 /* configure channel to parent clock / 4, irq off */
151 ch->rate = clk_get_rate(ch->tmu->clk) / 4;
152 sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
153
154 /* enable channel */
155 sh_tmu_start_stop_ch(ch, 1);
156
157 return 0;
158 }
159
160 static int sh_tmu_enable(struct sh_tmu_channel *ch)
161 {
162 if (ch->enable_count++ > 0)
163 return 0;
164
165 pm_runtime_get_sync(&ch->tmu->pdev->dev);
166 dev_pm_syscore_device(&ch->tmu->pdev->dev, true);
167
168 return __sh_tmu_enable(ch);
169 }
170
171 static void __sh_tmu_disable(struct sh_tmu_channel *ch)
172 {
173 /* disable channel */
174 sh_tmu_start_stop_ch(ch, 0);
175
176 /* disable interrupts in TMU block */
177 sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
178
179 /* stop clock */
180 clk_disable(ch->tmu->clk);
181 }
182
183 static void sh_tmu_disable(struct sh_tmu_channel *ch)
184 {
185 if (WARN_ON(ch->enable_count == 0))
186 return;
187
188 if (--ch->enable_count > 0)
189 return;
190
191 __sh_tmu_disable(ch);
192
193 dev_pm_syscore_device(&ch->tmu->pdev->dev, false);
194 pm_runtime_put(&ch->tmu->pdev->dev);
195 }
196
197 static void sh_tmu_set_next(struct sh_tmu_channel *ch, unsigned long delta,
198 int periodic)
199 {
200 /* stop timer */
201 sh_tmu_start_stop_ch(ch, 0);
202
203 /* acknowledge interrupt */
204 sh_tmu_read(ch, TCR);
205
206 /* enable interrupt */
207 sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);
208
209 /* reload delta value in case of periodic timer */
210 if (periodic)
211 sh_tmu_write(ch, TCOR, delta);
212 else
213 sh_tmu_write(ch, TCOR, 0xffffffff);
214
215 sh_tmu_write(ch, TCNT, delta);
216
217 /* start timer */
218 sh_tmu_start_stop_ch(ch, 1);
219 }
220
221 static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id)
222 {
223 struct sh_tmu_channel *ch = dev_id;
224
225 /* disable or acknowledge interrupt */
226 if (ch->ced.mode == CLOCK_EVT_MODE_ONESHOT)
227 sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
228 else
229 sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);
230
231 /* notify clockevent layer */
232 ch->ced.event_handler(&ch->ced);
233 return IRQ_HANDLED;
234 }
235
236 static struct sh_tmu_channel *cs_to_sh_tmu(struct clocksource *cs)
237 {
238 return container_of(cs, struct sh_tmu_channel, cs);
239 }
240
241 static cycle_t sh_tmu_clocksource_read(struct clocksource *cs)
242 {
243 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
244
245 return sh_tmu_read(ch, TCNT) ^ 0xffffffff;
246 }
247
248 static int sh_tmu_clocksource_enable(struct clocksource *cs)
249 {
250 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
251 int ret;
252
253 if (WARN_ON(ch->cs_enabled))
254 return 0;
255
256 ret = sh_tmu_enable(ch);
257 if (!ret) {
258 __clocksource_updatefreq_hz(cs, ch->rate);
259 ch->cs_enabled = true;
260 }
261
262 return ret;
263 }
264
265 static void sh_tmu_clocksource_disable(struct clocksource *cs)
266 {
267 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
268
269 if (WARN_ON(!ch->cs_enabled))
270 return;
271
272 sh_tmu_disable(ch);
273 ch->cs_enabled = false;
274 }
275
276 static void sh_tmu_clocksource_suspend(struct clocksource *cs)
277 {
278 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
279
280 if (!ch->cs_enabled)
281 return;
282
283 if (--ch->enable_count == 0) {
284 __sh_tmu_disable(ch);
285 pm_genpd_syscore_poweroff(&ch->tmu->pdev->dev);
286 }
287 }
288
289 static void sh_tmu_clocksource_resume(struct clocksource *cs)
290 {
291 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
292
293 if (!ch->cs_enabled)
294 return;
295
296 if (ch->enable_count++ == 0) {
297 pm_genpd_syscore_poweron(&ch->tmu->pdev->dev);
298 __sh_tmu_enable(ch);
299 }
300 }
301
302 static int sh_tmu_register_clocksource(struct sh_tmu_channel *ch,
303 const char *name, unsigned long rating)
304 {
305 struct clocksource *cs = &ch->cs;
306
307 cs->name = name;
308 cs->rating = rating;
309 cs->read = sh_tmu_clocksource_read;
310 cs->enable = sh_tmu_clocksource_enable;
311 cs->disable = sh_tmu_clocksource_disable;
312 cs->suspend = sh_tmu_clocksource_suspend;
313 cs->resume = sh_tmu_clocksource_resume;
314 cs->mask = CLOCKSOURCE_MASK(32);
315 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
316
317 dev_info(&ch->tmu->pdev->dev, "ch%u: used as clock source\n",
318 ch->index);
319
320 /* Register with dummy 1 Hz value, gets updated in ->enable() */
321 clocksource_register_hz(cs, 1);
322 return 0;
323 }
324
325 static struct sh_tmu_channel *ced_to_sh_tmu(struct clock_event_device *ced)
326 {
327 return container_of(ced, struct sh_tmu_channel, ced);
328 }
329
330 static void sh_tmu_clock_event_start(struct sh_tmu_channel *ch, int periodic)
331 {
332 struct clock_event_device *ced = &ch->ced;
333
334 sh_tmu_enable(ch);
335
336 clockevents_config(ced, ch->rate);
337
338 if (periodic) {
339 ch->periodic = (ch->rate + HZ/2) / HZ;
340 sh_tmu_set_next(ch, ch->periodic, 1);
341 }
342 }
343
344 static void sh_tmu_clock_event_mode(enum clock_event_mode mode,
345 struct clock_event_device *ced)
346 {
347 struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
348 int disabled = 0;
349
350 /* deal with old setting first */
351 switch (ced->mode) {
352 case CLOCK_EVT_MODE_PERIODIC:
353 case CLOCK_EVT_MODE_ONESHOT:
354 sh_tmu_disable(ch);
355 disabled = 1;
356 break;
357 default:
358 break;
359 }
360
361 switch (mode) {
362 case CLOCK_EVT_MODE_PERIODIC:
363 dev_info(&ch->tmu->pdev->dev,
364 "ch%u: used for periodic clock events\n", ch->index);
365 sh_tmu_clock_event_start(ch, 1);
366 break;
367 case CLOCK_EVT_MODE_ONESHOT:
368 dev_info(&ch->tmu->pdev->dev,
369 "ch%u: used for oneshot clock events\n", ch->index);
370 sh_tmu_clock_event_start(ch, 0);
371 break;
372 case CLOCK_EVT_MODE_UNUSED:
373 if (!disabled)
374 sh_tmu_disable(ch);
375 break;
376 case CLOCK_EVT_MODE_SHUTDOWN:
377 default:
378 break;
379 }
380 }
381
382 static int sh_tmu_clock_event_next(unsigned long delta,
383 struct clock_event_device *ced)
384 {
385 struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
386
387 BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
388
389 /* program new delta value */
390 sh_tmu_set_next(ch, delta, 0);
391 return 0;
392 }
393
394 static void sh_tmu_clock_event_suspend(struct clock_event_device *ced)
395 {
396 pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced)->tmu->pdev->dev);
397 }
398
399 static void sh_tmu_clock_event_resume(struct clock_event_device *ced)
400 {
401 pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced)->tmu->pdev->dev);
402 }
403
404 static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch,
405 const char *name, unsigned long rating)
406 {
407 struct clock_event_device *ced = &ch->ced;
408 int ret;
409
410 ced->name = name;
411 ced->features = CLOCK_EVT_FEAT_PERIODIC;
412 ced->features |= CLOCK_EVT_FEAT_ONESHOT;
413 ced->rating = rating;
414 ced->cpumask = cpumask_of(0);
415 ced->set_next_event = sh_tmu_clock_event_next;
416 ced->set_mode = sh_tmu_clock_event_mode;
417 ced->suspend = sh_tmu_clock_event_suspend;
418 ced->resume = sh_tmu_clock_event_resume;
419
420 dev_info(&ch->tmu->pdev->dev, "ch%u: used for clock events\n",
421 ch->index);
422
423 clockevents_config_and_register(ced, 1, 0x300, 0xffffffff);
424
425 ret = request_irq(ch->irq, sh_tmu_interrupt,
426 IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
427 dev_name(&ch->tmu->pdev->dev), ch);
428 if (ret) {
429 dev_err(&ch->tmu->pdev->dev, "ch%u: failed to request irq %d\n",
430 ch->index, ch->irq);
431 return;
432 }
433 }
434
435 static int sh_tmu_register(struct sh_tmu_channel *ch, const char *name,
436 unsigned long clockevent_rating,
437 unsigned long clocksource_rating)
438 {
439 if (clockevent_rating)
440 sh_tmu_register_clockevent(ch, name, clockevent_rating);
441 else if (clocksource_rating)
442 sh_tmu_register_clocksource(ch, name, clocksource_rating);
443
444 return 0;
445 }
446
447 static int sh_tmu_channel_setup(struct sh_tmu_channel *ch,
448 struct sh_tmu_device *tmu)
449 {
450 struct sh_timer_config *cfg = tmu->pdev->dev.platform_data;
451
452 ch->tmu = tmu;
453
454 /*
455 * The SH3 variant (SH770x, SH7705, SH7710 and SH7720) maps channel
456 * registers blocks at base + 2 + 12 * index, while all other variants
457 * map them at base + 4 + 12 * index. We can compute the index by just
458 * dividing by 12, the 2 bytes or 4 bytes offset being hidden by the
459 * integer division.
460 */
461 ch->index = cfg->channel_offset / 12;
462
463 ch->irq = platform_get_irq(tmu->pdev, 0);
464 if (ch->irq < 0) {
465 dev_err(&tmu->pdev->dev, "ch%u: failed to get irq\n",
466 ch->index);
467 return ch->irq;
468 }
469
470 ch->cs_enabled = false;
471 ch->enable_count = 0;
472
473 return sh_tmu_register(ch, dev_name(&tmu->pdev->dev),
474 cfg->clockevent_rating,
475 cfg->clocksource_rating);
476 }
477
478 static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev)
479 {
480 struct sh_timer_config *cfg = pdev->dev.platform_data;
481 struct resource *res;
482 void __iomem *base;
483 int ret;
484 ret = -ENXIO;
485
486 tmu->pdev = pdev;
487
488 if (!cfg) {
489 dev_err(&tmu->pdev->dev, "missing platform data\n");
490 goto err0;
491 }
492
493 platform_set_drvdata(pdev, tmu);
494
495 res = platform_get_resource(tmu->pdev, IORESOURCE_MEM, 0);
496 if (!res) {
497 dev_err(&tmu->pdev->dev, "failed to get I/O memory\n");
498 goto err0;
499 }
500
501 /*
502 * Map memory, let base point to our channel and mapbase to the
503 * start/stop shared register.
504 */
505 base = ioremap_nocache(res->start, resource_size(res));
506 if (base == NULL) {
507 dev_err(&tmu->pdev->dev, "failed to remap I/O memory\n");
508 goto err0;
509 }
510
511 tmu->mapbase = base - cfg->channel_offset;
512
513 /* get hold of clock */
514 tmu->clk = clk_get(&tmu->pdev->dev, "tmu_fck");
515 if (IS_ERR(tmu->clk)) {
516 dev_err(&tmu->pdev->dev, "cannot get clock\n");
517 ret = PTR_ERR(tmu->clk);
518 goto err1;
519 }
520
521 ret = clk_prepare(tmu->clk);
522 if (ret < 0)
523 goto err2;
524
525 tmu->channels = kzalloc(sizeof(*tmu->channels), GFP_KERNEL);
526 if (tmu->channels == NULL) {
527 ret = -ENOMEM;
528 goto err3;
529 }
530
531 tmu->num_channels = 1;
532
533 tmu->channels[0].base = base;
534
535 ret = sh_tmu_channel_setup(&tmu->channels[0], tmu);
536 if (ret < 0)
537 goto err3;
538
539 return 0;
540
541 err3:
542 kfree(tmu->channels);
543 clk_unprepare(tmu->clk);
544 err2:
545 clk_put(tmu->clk);
546 err1:
547 iounmap(base);
548 err0:
549 return ret;
550 }
551
552 static int sh_tmu_probe(struct platform_device *pdev)
553 {
554 struct sh_tmu_device *tmu = platform_get_drvdata(pdev);
555 struct sh_timer_config *cfg = pdev->dev.platform_data;
556 int ret;
557
558 if (!is_early_platform_device(pdev)) {
559 pm_runtime_set_active(&pdev->dev);
560 pm_runtime_enable(&pdev->dev);
561 }
562
563 if (tmu) {
564 dev_info(&pdev->dev, "kept as earlytimer\n");
565 goto out;
566 }
567
568 tmu = kzalloc(sizeof(*tmu), GFP_KERNEL);
569 if (tmu == NULL) {
570 dev_err(&pdev->dev, "failed to allocate driver data\n");
571 return -ENOMEM;
572 }
573
574 ret = sh_tmu_setup(tmu, pdev);
575 if (ret) {
576 kfree(tmu);
577 pm_runtime_idle(&pdev->dev);
578 return ret;
579 }
580 if (is_early_platform_device(pdev))
581 return 0;
582
583 out:
584 if (cfg->clockevent_rating || cfg->clocksource_rating)
585 pm_runtime_irq_safe(&pdev->dev);
586 else
587 pm_runtime_idle(&pdev->dev);
588
589 return 0;
590 }
591
592 static int sh_tmu_remove(struct platform_device *pdev)
593 {
594 return -EBUSY; /* cannot unregister clockevent and clocksource */
595 }
596
597 static struct platform_driver sh_tmu_device_driver = {
598 .probe = sh_tmu_probe,
599 .remove = sh_tmu_remove,
600 .driver = {
601 .name = "sh_tmu",
602 }
603 };
604
605 static int __init sh_tmu_init(void)
606 {
607 return platform_driver_register(&sh_tmu_device_driver);
608 }
609
610 static void __exit sh_tmu_exit(void)
611 {
612 platform_driver_unregister(&sh_tmu_device_driver);
613 }
614
615 early_platform_init("earlytimer", &sh_tmu_device_driver);
616 subsys_initcall(sh_tmu_init);
617 module_exit(sh_tmu_exit);
618
619 MODULE_AUTHOR("Magnus Damm");
620 MODULE_DESCRIPTION("SuperH TMU Timer Driver");
621 MODULE_LICENSE("GPL v2");
This page took 0.091909 seconds and 6 git commands to generate.