clocksource: sh_cmt: Allocate channels dynamically
[deliverable/linux.git] / drivers / clocksource / sh_cmt.c
1 /*
2 * SuperH Timer Support - CMT
3 *
4 * Copyright (C) 2008 Magnus Damm
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20 #include <linux/init.h>
21 #include <linux/platform_device.h>
22 #include <linux/spinlock.h>
23 #include <linux/interrupt.h>
24 #include <linux/ioport.h>
25 #include <linux/io.h>
26 #include <linux/clk.h>
27 #include <linux/irq.h>
28 #include <linux/err.h>
29 #include <linux/delay.h>
30 #include <linux/clocksource.h>
31 #include <linux/clockchips.h>
32 #include <linux/sh_timer.h>
33 #include <linux/slab.h>
34 #include <linux/module.h>
35 #include <linux/pm_domain.h>
36 #include <linux/pm_runtime.h>
37
38 struct sh_cmt_device;
39
40 struct sh_cmt_channel {
41 struct sh_cmt_device *cmt;
42 unsigned int index;
43
44 void __iomem *base;
45
46 unsigned long flags;
47 unsigned long match_value;
48 unsigned long next_match_value;
49 unsigned long max_match_value;
50 unsigned long rate;
51 raw_spinlock_t lock;
52 struct clock_event_device ced;
53 struct clocksource cs;
54 unsigned long total_cycles;
55 bool cs_enabled;
56 };
57
58 struct sh_cmt_device {
59 struct platform_device *pdev;
60
61 void __iomem *mapbase_ch;
62 void __iomem *mapbase;
63 struct clk *clk;
64
65 struct sh_cmt_channel *channels;
66 unsigned int num_channels;
67
68 unsigned long width; /* 16 or 32 bit version of hardware block */
69 unsigned long overflow_bit;
70 unsigned long clear_bits;
71
72 /* callbacks for CMSTR and CMCSR access */
73 unsigned long (*read_control)(void __iomem *base, unsigned long offs);
74 void (*write_control)(void __iomem *base, unsigned long offs,
75 unsigned long value);
76
77 /* callbacks for CMCNT and CMCOR access */
78 unsigned long (*read_count)(void __iomem *base, unsigned long offs);
79 void (*write_count)(void __iomem *base, unsigned long offs,
80 unsigned long value);
81 };
82
83 /* Examples of supported CMT timer register layouts and I/O access widths:
84 *
85 * "16-bit counter and 16-bit control" as found on sh7263:
86 * CMSTR 0xfffec000 16-bit
87 * CMCSR 0xfffec002 16-bit
88 * CMCNT 0xfffec004 16-bit
89 * CMCOR 0xfffec006 16-bit
90 *
91 * "32-bit counter and 16-bit control" as found on sh7372, sh73a0, r8a7740:
92 * CMSTR 0xffca0000 16-bit
93 * CMCSR 0xffca0060 16-bit
94 * CMCNT 0xffca0064 32-bit
95 * CMCOR 0xffca0068 32-bit
96 *
97 * "32-bit counter and 32-bit control" as found on r8a73a4 and r8a7790:
98 * CMSTR 0xffca0500 32-bit
99 * CMCSR 0xffca0510 32-bit
100 * CMCNT 0xffca0514 32-bit
101 * CMCOR 0xffca0518 32-bit
102 */
103
104 static unsigned long sh_cmt_read16(void __iomem *base, unsigned long offs)
105 {
106 return ioread16(base + (offs << 1));
107 }
108
109 static unsigned long sh_cmt_read32(void __iomem *base, unsigned long offs)
110 {
111 return ioread32(base + (offs << 2));
112 }
113
114 static void sh_cmt_write16(void __iomem *base, unsigned long offs,
115 unsigned long value)
116 {
117 iowrite16(value, base + (offs << 1));
118 }
119
120 static void sh_cmt_write32(void __iomem *base, unsigned long offs,
121 unsigned long value)
122 {
123 iowrite32(value, base + (offs << 2));
124 }
125
126 #define CMCSR 0 /* channel register */
127 #define CMCNT 1 /* channel register */
128 #define CMCOR 2 /* channel register */
129
130 static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_channel *ch)
131 {
132 return ch->cmt->read_control(ch->cmt->mapbase, 0);
133 }
134
135 static inline unsigned long sh_cmt_read_cmcsr(struct sh_cmt_channel *ch)
136 {
137 return ch->cmt->read_control(ch->base, CMCSR);
138 }
139
140 static inline unsigned long sh_cmt_read_cmcnt(struct sh_cmt_channel *ch)
141 {
142 return ch->cmt->read_count(ch->base, CMCNT);
143 }
144
145 static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch,
146 unsigned long value)
147 {
148 ch->cmt->write_control(ch->cmt->mapbase, 0, value);
149 }
150
151 static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch,
152 unsigned long value)
153 {
154 ch->cmt->write_control(ch->base, CMCSR, value);
155 }
156
157 static inline void sh_cmt_write_cmcnt(struct sh_cmt_channel *ch,
158 unsigned long value)
159 {
160 ch->cmt->write_count(ch->base, CMCNT, value);
161 }
162
163 static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch,
164 unsigned long value)
165 {
166 ch->cmt->write_count(ch->base, CMCOR, value);
167 }
168
169 static unsigned long sh_cmt_get_counter(struct sh_cmt_channel *ch,
170 int *has_wrapped)
171 {
172 unsigned long v1, v2, v3;
173 int o1, o2;
174
175 o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->overflow_bit;
176
177 /* Make sure the timer value is stable. Stolen from acpi_pm.c */
178 do {
179 o2 = o1;
180 v1 = sh_cmt_read_cmcnt(ch);
181 v2 = sh_cmt_read_cmcnt(ch);
182 v3 = sh_cmt_read_cmcnt(ch);
183 o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->overflow_bit;
184 } while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3)
185 || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2)));
186
187 *has_wrapped = o1;
188 return v2;
189 }
190
191 static DEFINE_RAW_SPINLOCK(sh_cmt_lock);
192
193 static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start)
194 {
195 struct sh_timer_config *cfg = ch->cmt->pdev->dev.platform_data;
196 unsigned long flags, value;
197
198 /* start stop register shared by multiple timer channels */
199 raw_spin_lock_irqsave(&sh_cmt_lock, flags);
200 value = sh_cmt_read_cmstr(ch);
201
202 if (start)
203 value |= 1 << cfg->timer_bit;
204 else
205 value &= ~(1 << cfg->timer_bit);
206
207 sh_cmt_write_cmstr(ch, value);
208 raw_spin_unlock_irqrestore(&sh_cmt_lock, flags);
209 }
210
211 static int sh_cmt_enable(struct sh_cmt_channel *ch, unsigned long *rate)
212 {
213 int k, ret;
214
215 pm_runtime_get_sync(&ch->cmt->pdev->dev);
216 dev_pm_syscore_device(&ch->cmt->pdev->dev, true);
217
218 /* enable clock */
219 ret = clk_enable(ch->cmt->clk);
220 if (ret) {
221 dev_err(&ch->cmt->pdev->dev, "ch%u: cannot enable clock\n",
222 ch->index);
223 goto err0;
224 }
225
226 /* make sure channel is disabled */
227 sh_cmt_start_stop_ch(ch, 0);
228
229 /* configure channel, periodic mode and maximum timeout */
230 if (ch->cmt->width == 16) {
231 *rate = clk_get_rate(ch->cmt->clk) / 512;
232 sh_cmt_write_cmcsr(ch, 0x43);
233 } else {
234 *rate = clk_get_rate(ch->cmt->clk) / 8;
235 sh_cmt_write_cmcsr(ch, 0x01a4);
236 }
237
238 sh_cmt_write_cmcor(ch, 0xffffffff);
239 sh_cmt_write_cmcnt(ch, 0);
240
241 /*
242 * According to the sh73a0 user's manual, as CMCNT can be operated
243 * only by the RCLK (Pseudo 32 KHz), there's one restriction on
244 * modifying CMCNT register; two RCLK cycles are necessary before
245 * this register is either read or any modification of the value
246 * it holds is reflected in the LSI's actual operation.
247 *
248 * While at it, we're supposed to clear out the CMCNT as of this
249 * moment, so make sure it's processed properly here. This will
250 * take RCLKx2 at maximum.
251 */
252 for (k = 0; k < 100; k++) {
253 if (!sh_cmt_read_cmcnt(ch))
254 break;
255 udelay(1);
256 }
257
258 if (sh_cmt_read_cmcnt(ch)) {
259 dev_err(&ch->cmt->pdev->dev, "ch%u: cannot clear CMCNT\n",
260 ch->index);
261 ret = -ETIMEDOUT;
262 goto err1;
263 }
264
265 /* enable channel */
266 sh_cmt_start_stop_ch(ch, 1);
267 return 0;
268 err1:
269 /* stop clock */
270 clk_disable(ch->cmt->clk);
271
272 err0:
273 return ret;
274 }
275
276 static void sh_cmt_disable(struct sh_cmt_channel *ch)
277 {
278 /* disable channel */
279 sh_cmt_start_stop_ch(ch, 0);
280
281 /* disable interrupts in CMT block */
282 sh_cmt_write_cmcsr(ch, 0);
283
284 /* stop clock */
285 clk_disable(ch->cmt->clk);
286
287 dev_pm_syscore_device(&ch->cmt->pdev->dev, false);
288 pm_runtime_put(&ch->cmt->pdev->dev);
289 }
290
291 /* private flags */
292 #define FLAG_CLOCKEVENT (1 << 0)
293 #define FLAG_CLOCKSOURCE (1 << 1)
294 #define FLAG_REPROGRAM (1 << 2)
295 #define FLAG_SKIPEVENT (1 << 3)
296 #define FLAG_IRQCONTEXT (1 << 4)
297
298 static void sh_cmt_clock_event_program_verify(struct sh_cmt_channel *ch,
299 int absolute)
300 {
301 unsigned long new_match;
302 unsigned long value = ch->next_match_value;
303 unsigned long delay = 0;
304 unsigned long now = 0;
305 int has_wrapped;
306
307 now = sh_cmt_get_counter(ch, &has_wrapped);
308 ch->flags |= FLAG_REPROGRAM; /* force reprogram */
309
310 if (has_wrapped) {
311 /* we're competing with the interrupt handler.
312 * -> let the interrupt handler reprogram the timer.
313 * -> interrupt number two handles the event.
314 */
315 ch->flags |= FLAG_SKIPEVENT;
316 return;
317 }
318
319 if (absolute)
320 now = 0;
321
322 do {
323 /* reprogram the timer hardware,
324 * but don't save the new match value yet.
325 */
326 new_match = now + value + delay;
327 if (new_match > ch->max_match_value)
328 new_match = ch->max_match_value;
329
330 sh_cmt_write_cmcor(ch, new_match);
331
332 now = sh_cmt_get_counter(ch, &has_wrapped);
333 if (has_wrapped && (new_match > ch->match_value)) {
334 /* we are changing to a greater match value,
335 * so this wrap must be caused by the counter
336 * matching the old value.
337 * -> first interrupt reprograms the timer.
338 * -> interrupt number two handles the event.
339 */
340 ch->flags |= FLAG_SKIPEVENT;
341 break;
342 }
343
344 if (has_wrapped) {
345 /* we are changing to a smaller match value,
346 * so the wrap must be caused by the counter
347 * matching the new value.
348 * -> save programmed match value.
349 * -> let isr handle the event.
350 */
351 ch->match_value = new_match;
352 break;
353 }
354
355 /* be safe: verify hardware settings */
356 if (now < new_match) {
357 /* timer value is below match value, all good.
358 * this makes sure we won't miss any match events.
359 * -> save programmed match value.
360 * -> let isr handle the event.
361 */
362 ch->match_value = new_match;
363 break;
364 }
365
366 /* the counter has reached a value greater
367 * than our new match value. and since the
368 * has_wrapped flag isn't set we must have
369 * programmed a too close event.
370 * -> increase delay and retry.
371 */
372 if (delay)
373 delay <<= 1;
374 else
375 delay = 1;
376
377 if (!delay)
378 dev_warn(&ch->cmt->pdev->dev, "ch%u: too long delay\n",
379 ch->index);
380
381 } while (delay);
382 }
383
384 static void __sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)
385 {
386 if (delta > ch->max_match_value)
387 dev_warn(&ch->cmt->pdev->dev, "ch%u: delta out of range\n",
388 ch->index);
389
390 ch->next_match_value = delta;
391 sh_cmt_clock_event_program_verify(ch, 0);
392 }
393
394 static void sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)
395 {
396 unsigned long flags;
397
398 raw_spin_lock_irqsave(&ch->lock, flags);
399 __sh_cmt_set_next(ch, delta);
400 raw_spin_unlock_irqrestore(&ch->lock, flags);
401 }
402
403 static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
404 {
405 struct sh_cmt_channel *ch = dev_id;
406
407 /* clear flags */
408 sh_cmt_write_cmcsr(ch, sh_cmt_read_cmcsr(ch) & ch->cmt->clear_bits);
409
410 /* update clock source counter to begin with if enabled
411 * the wrap flag should be cleared by the timer specific
412 * isr before we end up here.
413 */
414 if (ch->flags & FLAG_CLOCKSOURCE)
415 ch->total_cycles += ch->match_value + 1;
416
417 if (!(ch->flags & FLAG_REPROGRAM))
418 ch->next_match_value = ch->max_match_value;
419
420 ch->flags |= FLAG_IRQCONTEXT;
421
422 if (ch->flags & FLAG_CLOCKEVENT) {
423 if (!(ch->flags & FLAG_SKIPEVENT)) {
424 if (ch->ced.mode == CLOCK_EVT_MODE_ONESHOT) {
425 ch->next_match_value = ch->max_match_value;
426 ch->flags |= FLAG_REPROGRAM;
427 }
428
429 ch->ced.event_handler(&ch->ced);
430 }
431 }
432
433 ch->flags &= ~FLAG_SKIPEVENT;
434
435 if (ch->flags & FLAG_REPROGRAM) {
436 ch->flags &= ~FLAG_REPROGRAM;
437 sh_cmt_clock_event_program_verify(ch, 1);
438
439 if (ch->flags & FLAG_CLOCKEVENT)
440 if ((ch->ced.mode == CLOCK_EVT_MODE_SHUTDOWN)
441 || (ch->match_value == ch->next_match_value))
442 ch->flags &= ~FLAG_REPROGRAM;
443 }
444
445 ch->flags &= ~FLAG_IRQCONTEXT;
446
447 return IRQ_HANDLED;
448 }
449
450 static int sh_cmt_start(struct sh_cmt_channel *ch, unsigned long flag)
451 {
452 int ret = 0;
453 unsigned long flags;
454
455 raw_spin_lock_irqsave(&ch->lock, flags);
456
457 if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
458 ret = sh_cmt_enable(ch, &ch->rate);
459
460 if (ret)
461 goto out;
462 ch->flags |= flag;
463
464 /* setup timeout if no clockevent */
465 if ((flag == FLAG_CLOCKSOURCE) && (!(ch->flags & FLAG_CLOCKEVENT)))
466 __sh_cmt_set_next(ch, ch->max_match_value);
467 out:
468 raw_spin_unlock_irqrestore(&ch->lock, flags);
469
470 return ret;
471 }
472
473 static void sh_cmt_stop(struct sh_cmt_channel *ch, unsigned long flag)
474 {
475 unsigned long flags;
476 unsigned long f;
477
478 raw_spin_lock_irqsave(&ch->lock, flags);
479
480 f = ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
481 ch->flags &= ~flag;
482
483 if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
484 sh_cmt_disable(ch);
485
486 /* adjust the timeout to maximum if only clocksource left */
487 if ((flag == FLAG_CLOCKEVENT) && (ch->flags & FLAG_CLOCKSOURCE))
488 __sh_cmt_set_next(ch, ch->max_match_value);
489
490 raw_spin_unlock_irqrestore(&ch->lock, flags);
491 }
492
493 static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
494 {
495 return container_of(cs, struct sh_cmt_channel, cs);
496 }
497
498 static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
499 {
500 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
501 unsigned long flags, raw;
502 unsigned long value;
503 int has_wrapped;
504
505 raw_spin_lock_irqsave(&ch->lock, flags);
506 value = ch->total_cycles;
507 raw = sh_cmt_get_counter(ch, &has_wrapped);
508
509 if (unlikely(has_wrapped))
510 raw += ch->match_value + 1;
511 raw_spin_unlock_irqrestore(&ch->lock, flags);
512
513 return value + raw;
514 }
515
516 static int sh_cmt_clocksource_enable(struct clocksource *cs)
517 {
518 int ret;
519 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
520
521 WARN_ON(ch->cs_enabled);
522
523 ch->total_cycles = 0;
524
525 ret = sh_cmt_start(ch, FLAG_CLOCKSOURCE);
526 if (!ret) {
527 __clocksource_updatefreq_hz(cs, ch->rate);
528 ch->cs_enabled = true;
529 }
530 return ret;
531 }
532
533 static void sh_cmt_clocksource_disable(struct clocksource *cs)
534 {
535 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
536
537 WARN_ON(!ch->cs_enabled);
538
539 sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
540 ch->cs_enabled = false;
541 }
542
543 static void sh_cmt_clocksource_suspend(struct clocksource *cs)
544 {
545 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
546
547 sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
548 pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);
549 }
550
551 static void sh_cmt_clocksource_resume(struct clocksource *cs)
552 {
553 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
554
555 pm_genpd_syscore_poweron(&ch->cmt->pdev->dev);
556 sh_cmt_start(ch, FLAG_CLOCKSOURCE);
557 }
558
559 static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch,
560 const char *name, unsigned long rating)
561 {
562 struct clocksource *cs = &ch->cs;
563
564 cs->name = name;
565 cs->rating = rating;
566 cs->read = sh_cmt_clocksource_read;
567 cs->enable = sh_cmt_clocksource_enable;
568 cs->disable = sh_cmt_clocksource_disable;
569 cs->suspend = sh_cmt_clocksource_suspend;
570 cs->resume = sh_cmt_clocksource_resume;
571 cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
572 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
573
574 dev_info(&ch->cmt->pdev->dev, "ch%u: used as clock source\n",
575 ch->index);
576
577 /* Register with dummy 1 Hz value, gets updated in ->enable() */
578 clocksource_register_hz(cs, 1);
579 return 0;
580 }
581
582 static struct sh_cmt_channel *ced_to_sh_cmt(struct clock_event_device *ced)
583 {
584 return container_of(ced, struct sh_cmt_channel, ced);
585 }
586
587 static void sh_cmt_clock_event_start(struct sh_cmt_channel *ch, int periodic)
588 {
589 struct clock_event_device *ced = &ch->ced;
590
591 sh_cmt_start(ch, FLAG_CLOCKEVENT);
592
593 /* TODO: calculate good shift from rate and counter bit width */
594
595 ced->shift = 32;
596 ced->mult = div_sc(ch->rate, NSEC_PER_SEC, ced->shift);
597 ced->max_delta_ns = clockevent_delta2ns(ch->max_match_value, ced);
598 ced->min_delta_ns = clockevent_delta2ns(0x1f, ced);
599
600 if (periodic)
601 sh_cmt_set_next(ch, ((ch->rate + HZ/2) / HZ) - 1);
602 else
603 sh_cmt_set_next(ch, ch->max_match_value);
604 }
605
606 static void sh_cmt_clock_event_mode(enum clock_event_mode mode,
607 struct clock_event_device *ced)
608 {
609 struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
610
611 /* deal with old setting first */
612 switch (ced->mode) {
613 case CLOCK_EVT_MODE_PERIODIC:
614 case CLOCK_EVT_MODE_ONESHOT:
615 sh_cmt_stop(ch, FLAG_CLOCKEVENT);
616 break;
617 default:
618 break;
619 }
620
621 switch (mode) {
622 case CLOCK_EVT_MODE_PERIODIC:
623 dev_info(&ch->cmt->pdev->dev,
624 "ch%u: used for periodic clock events\n", ch->index);
625 sh_cmt_clock_event_start(ch, 1);
626 break;
627 case CLOCK_EVT_MODE_ONESHOT:
628 dev_info(&ch->cmt->pdev->dev,
629 "ch%u: used for oneshot clock events\n", ch->index);
630 sh_cmt_clock_event_start(ch, 0);
631 break;
632 case CLOCK_EVT_MODE_SHUTDOWN:
633 case CLOCK_EVT_MODE_UNUSED:
634 sh_cmt_stop(ch, FLAG_CLOCKEVENT);
635 break;
636 default:
637 break;
638 }
639 }
640
641 static int sh_cmt_clock_event_next(unsigned long delta,
642 struct clock_event_device *ced)
643 {
644 struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
645
646 BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
647 if (likely(ch->flags & FLAG_IRQCONTEXT))
648 ch->next_match_value = delta - 1;
649 else
650 sh_cmt_set_next(ch, delta - 1);
651
652 return 0;
653 }
654
655 static void sh_cmt_clock_event_suspend(struct clock_event_device *ced)
656 {
657 struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
658
659 pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);
660 clk_unprepare(ch->cmt->clk);
661 }
662
663 static void sh_cmt_clock_event_resume(struct clock_event_device *ced)
664 {
665 struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
666
667 clk_prepare(ch->cmt->clk);
668 pm_genpd_syscore_poweron(&ch->cmt->pdev->dev);
669 }
670
671 static void sh_cmt_register_clockevent(struct sh_cmt_channel *ch,
672 const char *name, unsigned long rating)
673 {
674 struct clock_event_device *ced = &ch->ced;
675
676 ced->name = name;
677 ced->features = CLOCK_EVT_FEAT_PERIODIC;
678 ced->features |= CLOCK_EVT_FEAT_ONESHOT;
679 ced->rating = rating;
680 ced->cpumask = cpumask_of(0);
681 ced->set_next_event = sh_cmt_clock_event_next;
682 ced->set_mode = sh_cmt_clock_event_mode;
683 ced->suspend = sh_cmt_clock_event_suspend;
684 ced->resume = sh_cmt_clock_event_resume;
685
686 dev_info(&ch->cmt->pdev->dev, "ch%u: used for clock events\n",
687 ch->index);
688 clockevents_register_device(ced);
689 }
690
691 static int sh_cmt_register(struct sh_cmt_channel *ch, const char *name,
692 unsigned long clockevent_rating,
693 unsigned long clocksource_rating)
694 {
695 if (clockevent_rating)
696 sh_cmt_register_clockevent(ch, name, clockevent_rating);
697
698 if (clocksource_rating)
699 sh_cmt_register_clocksource(ch, name, clocksource_rating);
700
701 return 0;
702 }
703
704 static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index,
705 struct sh_cmt_device *cmt)
706 {
707 struct sh_timer_config *cfg = cmt->pdev->dev.platform_data;
708 int irq;
709 int ret;
710
711 ch->cmt = cmt;
712 ch->base = cmt->mapbase_ch;
713 ch->index = index;
714
715 irq = platform_get_irq(cmt->pdev, 0);
716 if (irq < 0) {
717 dev_err(&cmt->pdev->dev, "ch%u: failed to get irq\n",
718 ch->index);
719 return irq;
720 }
721
722 if (cmt->width == (sizeof(ch->max_match_value) * 8))
723 ch->max_match_value = ~0;
724 else
725 ch->max_match_value = (1 << cmt->width) - 1;
726
727 ch->match_value = ch->max_match_value;
728 raw_spin_lock_init(&ch->lock);
729
730 ret = sh_cmt_register(ch, dev_name(&cmt->pdev->dev),
731 cfg->clockevent_rating,
732 cfg->clocksource_rating);
733 if (ret) {
734 dev_err(&cmt->pdev->dev, "ch%u: registration failed\n",
735 ch->index);
736 return ret;
737 }
738 ch->cs_enabled = false;
739
740 ret = request_irq(irq, sh_cmt_interrupt,
741 IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
742 dev_name(&cmt->pdev->dev), ch);
743 if (ret) {
744 dev_err(&cmt->pdev->dev, "ch%u: failed to request irq %d\n",
745 ch->index, irq);
746 return ret;
747 }
748
749 return 0;
750 }
751
752 static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
753 {
754 struct sh_timer_config *cfg = pdev->dev.platform_data;
755 struct resource *res, *res2;
756 int ret;
757 ret = -ENXIO;
758
759 cmt->pdev = pdev;
760
761 if (!cfg) {
762 dev_err(&cmt->pdev->dev, "missing platform data\n");
763 goto err0;
764 }
765
766 res = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 0);
767 if (!res) {
768 dev_err(&cmt->pdev->dev, "failed to get I/O memory\n");
769 goto err0;
770 }
771
772 /* optional resource for the shared timer start/stop register */
773 res2 = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 1);
774
775 /* map memory, let mapbase_ch point to our channel */
776 cmt->mapbase_ch = ioremap_nocache(res->start, resource_size(res));
777 if (cmt->mapbase_ch == NULL) {
778 dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n");
779 goto err0;
780 }
781
782 /* map second resource for CMSTR */
783 cmt->mapbase = ioremap_nocache(res2 ? res2->start :
784 res->start - cfg->channel_offset,
785 res2 ? resource_size(res2) : 2);
786 if (cmt->mapbase == NULL) {
787 dev_err(&cmt->pdev->dev, "failed to remap I/O second memory\n");
788 goto err1;
789 }
790
791 /* get hold of clock */
792 cmt->clk = clk_get(&cmt->pdev->dev, "cmt_fck");
793 if (IS_ERR(cmt->clk)) {
794 dev_err(&cmt->pdev->dev, "cannot get clock\n");
795 ret = PTR_ERR(cmt->clk);
796 goto err2;
797 }
798
799 ret = clk_prepare(cmt->clk);
800 if (ret < 0)
801 goto err3;
802
803 if (res2 && (resource_size(res2) == 4)) {
804 /* assume both CMSTR and CMCSR to be 32-bit */
805 cmt->read_control = sh_cmt_read32;
806 cmt->write_control = sh_cmt_write32;
807 } else {
808 cmt->read_control = sh_cmt_read16;
809 cmt->write_control = sh_cmt_write16;
810 }
811
812 if (resource_size(res) == 6) {
813 cmt->width = 16;
814 cmt->read_count = sh_cmt_read16;
815 cmt->write_count = sh_cmt_write16;
816 cmt->overflow_bit = 0x80;
817 cmt->clear_bits = ~0x80;
818 } else {
819 cmt->width = 32;
820 cmt->read_count = sh_cmt_read32;
821 cmt->write_count = sh_cmt_write32;
822 cmt->overflow_bit = 0x8000;
823 cmt->clear_bits = ~0xc000;
824 }
825
826 cmt->channels = kzalloc(sizeof(*cmt->channels), GFP_KERNEL);
827 if (cmt->channels == NULL) {
828 ret = -ENOMEM;
829 goto err4;
830 }
831
832 cmt->num_channels = 1;
833
834 ret = sh_cmt_setup_channel(&cmt->channels[0], cfg->timer_bit, cmt);
835 if (ret < 0)
836 goto err4;
837
838 platform_set_drvdata(pdev, cmt);
839
840 return 0;
841 err4:
842 kfree(cmt->channels);
843 clk_unprepare(cmt->clk);
844 err3:
845 clk_put(cmt->clk);
846 err2:
847 iounmap(cmt->mapbase);
848 err1:
849 iounmap(cmt->mapbase_ch);
850 err0:
851 return ret;
852 }
853
854 static int sh_cmt_probe(struct platform_device *pdev)
855 {
856 struct sh_cmt_device *cmt = platform_get_drvdata(pdev);
857 struct sh_timer_config *cfg = pdev->dev.platform_data;
858 int ret;
859
860 if (!is_early_platform_device(pdev)) {
861 pm_runtime_set_active(&pdev->dev);
862 pm_runtime_enable(&pdev->dev);
863 }
864
865 if (cmt) {
866 dev_info(&pdev->dev, "kept as earlytimer\n");
867 goto out;
868 }
869
870 cmt = kzalloc(sizeof(*cmt), GFP_KERNEL);
871 if (cmt == NULL) {
872 dev_err(&pdev->dev, "failed to allocate driver data\n");
873 return -ENOMEM;
874 }
875
876 ret = sh_cmt_setup(cmt, pdev);
877 if (ret) {
878 kfree(cmt);
879 pm_runtime_idle(&pdev->dev);
880 return ret;
881 }
882 if (is_early_platform_device(pdev))
883 return 0;
884
885 out:
886 if (cfg->clockevent_rating || cfg->clocksource_rating)
887 pm_runtime_irq_safe(&pdev->dev);
888 else
889 pm_runtime_idle(&pdev->dev);
890
891 return 0;
892 }
893
894 static int sh_cmt_remove(struct platform_device *pdev)
895 {
896 return -EBUSY; /* cannot unregister clockevent and clocksource */
897 }
898
899 static struct platform_driver sh_cmt_device_driver = {
900 .probe = sh_cmt_probe,
901 .remove = sh_cmt_remove,
902 .driver = {
903 .name = "sh_cmt",
904 }
905 };
906
907 static int __init sh_cmt_init(void)
908 {
909 return platform_driver_register(&sh_cmt_device_driver);
910 }
911
912 static void __exit sh_cmt_exit(void)
913 {
914 platform_driver_unregister(&sh_cmt_device_driver);
915 }
916
917 early_platform_init("earlytimer", &sh_cmt_device_driver);
918 subsys_initcall(sh_cmt_init);
919 module_exit(sh_cmt_exit);
920
921 MODULE_AUTHOR("Magnus Damm");
922 MODULE_DESCRIPTION("SuperH CMT Timer Driver");
923 MODULE_LICENSE("GPL v2");
This page took 0.050817 seconds and 6 git commands to generate.