clocksource: sh_cmt: Split static information from sh_cmt_device
[deliverable/linux.git] / drivers / clocksource / sh_cmt.c
... / ...
CommitLineData
1/*
2 * SuperH Timer Support - CMT
3 *
4 * Copyright (C) 2008 Magnus Damm
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/init.h>
21#include <linux/platform_device.h>
22#include <linux/spinlock.h>
23#include <linux/interrupt.h>
24#include <linux/ioport.h>
25#include <linux/io.h>
26#include <linux/clk.h>
27#include <linux/irq.h>
28#include <linux/err.h>
29#include <linux/delay.h>
30#include <linux/clocksource.h>
31#include <linux/clockchips.h>
32#include <linux/sh_timer.h>
33#include <linux/slab.h>
34#include <linux/module.h>
35#include <linux/pm_domain.h>
36#include <linux/pm_runtime.h>
37
38struct sh_cmt_device;
39
40/*
41 * The CMT comes in 5 different identified flavours, depending not only on the
42 * SoC but also on the particular instance. The following table lists the main
43 * characteristics of those flavours.
44 *
45 * 16B 32B 32B-F 48B 48B-2
46 * -----------------------------------------------------------------------------
47 * Channels 2 1/4 1 6 2/8
48 * Control Width 16 16 16 16 32
49 * Counter Width 16 32 32 32/48 32/48
50 * Shared Start/Stop Y Y Y Y N
51 *
52 * The 48-bit gen2 version has a per-channel start/stop register located in the
53 * channel registers block. All other versions have a shared start/stop register
54 * located in the global space.
55 *
56 * Note that CMT0 on r8a73a4, r8a7790 and r8a7791, while implementing 32-bit
57 * channels only, is a 48-bit gen2 CMT with the 48-bit channels unavailable.
58 */
59
60enum sh_cmt_model {
61 SH_CMT_16BIT,
62 SH_CMT_32BIT,
63 SH_CMT_32BIT_FAST,
64 SH_CMT_48BIT,
65 SH_CMT_48BIT_GEN2,
66};
67
68struct sh_cmt_info {
69 enum sh_cmt_model model;
70
71 unsigned long width; /* 16 or 32 bit version of hardware block */
72 unsigned long overflow_bit;
73 unsigned long clear_bits;
74
75 /* callbacks for CMSTR and CMCSR access */
76 unsigned long (*read_control)(void __iomem *base, unsigned long offs);
77 void (*write_control)(void __iomem *base, unsigned long offs,
78 unsigned long value);
79
80 /* callbacks for CMCNT and CMCOR access */
81 unsigned long (*read_count)(void __iomem *base, unsigned long offs);
82 void (*write_count)(void __iomem *base, unsigned long offs,
83 unsigned long value);
84};
85
86struct sh_cmt_channel {
87 struct sh_cmt_device *cmt;
88 unsigned int index;
89
90 void __iomem *base;
91
92 unsigned long flags;
93 unsigned long match_value;
94 unsigned long next_match_value;
95 unsigned long max_match_value;
96 unsigned long rate;
97 raw_spinlock_t lock;
98 struct clock_event_device ced;
99 struct clocksource cs;
100 unsigned long total_cycles;
101 bool cs_enabled;
102};
103
104struct sh_cmt_device {
105 struct platform_device *pdev;
106
107 const struct sh_cmt_info *info;
108
109 void __iomem *mapbase_ch;
110 void __iomem *mapbase;
111 struct clk *clk;
112
113 struct sh_cmt_channel *channels;
114 unsigned int num_channels;
115};
116
117static unsigned long sh_cmt_read16(void __iomem *base, unsigned long offs)
118{
119 return ioread16(base + (offs << 1));
120}
121
122static unsigned long sh_cmt_read32(void __iomem *base, unsigned long offs)
123{
124 return ioread32(base + (offs << 2));
125}
126
127static void sh_cmt_write16(void __iomem *base, unsigned long offs,
128 unsigned long value)
129{
130 iowrite16(value, base + (offs << 1));
131}
132
133static void sh_cmt_write32(void __iomem *base, unsigned long offs,
134 unsigned long value)
135{
136 iowrite32(value, base + (offs << 2));
137}
138
139static const struct sh_cmt_info sh_cmt_info[] = {
140 [SH_CMT_16BIT] = {
141 .model = SH_CMT_16BIT,
142 .width = 16,
143 .overflow_bit = 0x80,
144 .clear_bits = ~0x80,
145 .read_control = sh_cmt_read16,
146 .write_control = sh_cmt_write16,
147 .read_count = sh_cmt_read16,
148 .write_count = sh_cmt_write16,
149 },
150 [SH_CMT_32BIT] = {
151 .model = SH_CMT_32BIT,
152 .width = 32,
153 .overflow_bit = 0x8000,
154 .clear_bits = ~0xc000,
155 .read_control = sh_cmt_read16,
156 .write_control = sh_cmt_write16,
157 .read_count = sh_cmt_read32,
158 .write_count = sh_cmt_write32,
159 },
160 [SH_CMT_32BIT_FAST] = {
161 .model = SH_CMT_32BIT_FAST,
162 .width = 32,
163 .overflow_bit = 0x8000,
164 .clear_bits = ~0xc000,
165 .read_control = sh_cmt_read16,
166 .write_control = sh_cmt_write16,
167 .read_count = sh_cmt_read32,
168 .write_count = sh_cmt_write32,
169 },
170 [SH_CMT_48BIT] = {
171 .model = SH_CMT_48BIT,
172 .width = 32,
173 .overflow_bit = 0x8000,
174 .clear_bits = ~0xc000,
175 .read_control = sh_cmt_read32,
176 .write_control = sh_cmt_write32,
177 .read_count = sh_cmt_read32,
178 .write_count = sh_cmt_write32,
179 },
180 [SH_CMT_48BIT_GEN2] = {
181 .model = SH_CMT_48BIT_GEN2,
182 .width = 32,
183 .overflow_bit = 0x8000,
184 .clear_bits = ~0xc000,
185 .read_control = sh_cmt_read32,
186 .write_control = sh_cmt_write32,
187 .read_count = sh_cmt_read32,
188 .write_count = sh_cmt_write32,
189 },
190};
191
192#define CMCSR 0 /* channel register */
193#define CMCNT 1 /* channel register */
194#define CMCOR 2 /* channel register */
195
196static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_channel *ch)
197{
198 return ch->cmt->info->read_control(ch->cmt->mapbase, 0);
199}
200
201static inline unsigned long sh_cmt_read_cmcsr(struct sh_cmt_channel *ch)
202{
203 return ch->cmt->info->read_control(ch->base, CMCSR);
204}
205
206static inline unsigned long sh_cmt_read_cmcnt(struct sh_cmt_channel *ch)
207{
208 return ch->cmt->info->read_count(ch->base, CMCNT);
209}
210
211static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch,
212 unsigned long value)
213{
214 ch->cmt->info->write_control(ch->cmt->mapbase, 0, value);
215}
216
217static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch,
218 unsigned long value)
219{
220 ch->cmt->info->write_control(ch->base, CMCSR, value);
221}
222
223static inline void sh_cmt_write_cmcnt(struct sh_cmt_channel *ch,
224 unsigned long value)
225{
226 ch->cmt->info->write_count(ch->base, CMCNT, value);
227}
228
229static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch,
230 unsigned long value)
231{
232 ch->cmt->info->write_count(ch->base, CMCOR, value);
233}
234
235static unsigned long sh_cmt_get_counter(struct sh_cmt_channel *ch,
236 int *has_wrapped)
237{
238 unsigned long v1, v2, v3;
239 int o1, o2;
240
241 o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit;
242
243 /* Make sure the timer value is stable. Stolen from acpi_pm.c */
244 do {
245 o2 = o1;
246 v1 = sh_cmt_read_cmcnt(ch);
247 v2 = sh_cmt_read_cmcnt(ch);
248 v3 = sh_cmt_read_cmcnt(ch);
249 o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit;
250 } while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3)
251 || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2)));
252
253 *has_wrapped = o1;
254 return v2;
255}
256
257static DEFINE_RAW_SPINLOCK(sh_cmt_lock);
258
259static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start)
260{
261 struct sh_timer_config *cfg = ch->cmt->pdev->dev.platform_data;
262 unsigned long flags, value;
263
264 /* start stop register shared by multiple timer channels */
265 raw_spin_lock_irqsave(&sh_cmt_lock, flags);
266 value = sh_cmt_read_cmstr(ch);
267
268 if (start)
269 value |= 1 << cfg->timer_bit;
270 else
271 value &= ~(1 << cfg->timer_bit);
272
273 sh_cmt_write_cmstr(ch, value);
274 raw_spin_unlock_irqrestore(&sh_cmt_lock, flags);
275}
276
277static int sh_cmt_enable(struct sh_cmt_channel *ch, unsigned long *rate)
278{
279 int k, ret;
280
281 pm_runtime_get_sync(&ch->cmt->pdev->dev);
282 dev_pm_syscore_device(&ch->cmt->pdev->dev, true);
283
284 /* enable clock */
285 ret = clk_enable(ch->cmt->clk);
286 if (ret) {
287 dev_err(&ch->cmt->pdev->dev, "ch%u: cannot enable clock\n",
288 ch->index);
289 goto err0;
290 }
291
292 /* make sure channel is disabled */
293 sh_cmt_start_stop_ch(ch, 0);
294
295 /* configure channel, periodic mode and maximum timeout */
296 if (ch->cmt->info->width == 16) {
297 *rate = clk_get_rate(ch->cmt->clk) / 512;
298 sh_cmt_write_cmcsr(ch, 0x43);
299 } else {
300 *rate = clk_get_rate(ch->cmt->clk) / 8;
301 sh_cmt_write_cmcsr(ch, 0x01a4);
302 }
303
304 sh_cmt_write_cmcor(ch, 0xffffffff);
305 sh_cmt_write_cmcnt(ch, 0);
306
307 /*
308 * According to the sh73a0 user's manual, as CMCNT can be operated
309 * only by the RCLK (Pseudo 32 KHz), there's one restriction on
310 * modifying CMCNT register; two RCLK cycles are necessary before
311 * this register is either read or any modification of the value
312 * it holds is reflected in the LSI's actual operation.
313 *
314 * While at it, we're supposed to clear out the CMCNT as of this
315 * moment, so make sure it's processed properly here. This will
316 * take RCLKx2 at maximum.
317 */
318 for (k = 0; k < 100; k++) {
319 if (!sh_cmt_read_cmcnt(ch))
320 break;
321 udelay(1);
322 }
323
324 if (sh_cmt_read_cmcnt(ch)) {
325 dev_err(&ch->cmt->pdev->dev, "ch%u: cannot clear CMCNT\n",
326 ch->index);
327 ret = -ETIMEDOUT;
328 goto err1;
329 }
330
331 /* enable channel */
332 sh_cmt_start_stop_ch(ch, 1);
333 return 0;
334 err1:
335 /* stop clock */
336 clk_disable(ch->cmt->clk);
337
338 err0:
339 return ret;
340}
341
342static void sh_cmt_disable(struct sh_cmt_channel *ch)
343{
344 /* disable channel */
345 sh_cmt_start_stop_ch(ch, 0);
346
347 /* disable interrupts in CMT block */
348 sh_cmt_write_cmcsr(ch, 0);
349
350 /* stop clock */
351 clk_disable(ch->cmt->clk);
352
353 dev_pm_syscore_device(&ch->cmt->pdev->dev, false);
354 pm_runtime_put(&ch->cmt->pdev->dev);
355}
356
357/* private flags */
358#define FLAG_CLOCKEVENT (1 << 0)
359#define FLAG_CLOCKSOURCE (1 << 1)
360#define FLAG_REPROGRAM (1 << 2)
361#define FLAG_SKIPEVENT (1 << 3)
362#define FLAG_IRQCONTEXT (1 << 4)
363
364static void sh_cmt_clock_event_program_verify(struct sh_cmt_channel *ch,
365 int absolute)
366{
367 unsigned long new_match;
368 unsigned long value = ch->next_match_value;
369 unsigned long delay = 0;
370 unsigned long now = 0;
371 int has_wrapped;
372
373 now = sh_cmt_get_counter(ch, &has_wrapped);
374 ch->flags |= FLAG_REPROGRAM; /* force reprogram */
375
376 if (has_wrapped) {
377 /* we're competing with the interrupt handler.
378 * -> let the interrupt handler reprogram the timer.
379 * -> interrupt number two handles the event.
380 */
381 ch->flags |= FLAG_SKIPEVENT;
382 return;
383 }
384
385 if (absolute)
386 now = 0;
387
388 do {
389 /* reprogram the timer hardware,
390 * but don't save the new match value yet.
391 */
392 new_match = now + value + delay;
393 if (new_match > ch->max_match_value)
394 new_match = ch->max_match_value;
395
396 sh_cmt_write_cmcor(ch, new_match);
397
398 now = sh_cmt_get_counter(ch, &has_wrapped);
399 if (has_wrapped && (new_match > ch->match_value)) {
400 /* we are changing to a greater match value,
401 * so this wrap must be caused by the counter
402 * matching the old value.
403 * -> first interrupt reprograms the timer.
404 * -> interrupt number two handles the event.
405 */
406 ch->flags |= FLAG_SKIPEVENT;
407 break;
408 }
409
410 if (has_wrapped) {
411 /* we are changing to a smaller match value,
412 * so the wrap must be caused by the counter
413 * matching the new value.
414 * -> save programmed match value.
415 * -> let isr handle the event.
416 */
417 ch->match_value = new_match;
418 break;
419 }
420
421 /* be safe: verify hardware settings */
422 if (now < new_match) {
423 /* timer value is below match value, all good.
424 * this makes sure we won't miss any match events.
425 * -> save programmed match value.
426 * -> let isr handle the event.
427 */
428 ch->match_value = new_match;
429 break;
430 }
431
432 /* the counter has reached a value greater
433 * than our new match value. and since the
434 * has_wrapped flag isn't set we must have
435 * programmed a too close event.
436 * -> increase delay and retry.
437 */
438 if (delay)
439 delay <<= 1;
440 else
441 delay = 1;
442
443 if (!delay)
444 dev_warn(&ch->cmt->pdev->dev, "ch%u: too long delay\n",
445 ch->index);
446
447 } while (delay);
448}
449
450static void __sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)
451{
452 if (delta > ch->max_match_value)
453 dev_warn(&ch->cmt->pdev->dev, "ch%u: delta out of range\n",
454 ch->index);
455
456 ch->next_match_value = delta;
457 sh_cmt_clock_event_program_verify(ch, 0);
458}
459
460static void sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)
461{
462 unsigned long flags;
463
464 raw_spin_lock_irqsave(&ch->lock, flags);
465 __sh_cmt_set_next(ch, delta);
466 raw_spin_unlock_irqrestore(&ch->lock, flags);
467}
468
469static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
470{
471 struct sh_cmt_channel *ch = dev_id;
472
473 /* clear flags */
474 sh_cmt_write_cmcsr(ch, sh_cmt_read_cmcsr(ch) &
475 ch->cmt->info->clear_bits);
476
477 /* update clock source counter to begin with if enabled
478 * the wrap flag should be cleared by the timer specific
479 * isr before we end up here.
480 */
481 if (ch->flags & FLAG_CLOCKSOURCE)
482 ch->total_cycles += ch->match_value + 1;
483
484 if (!(ch->flags & FLAG_REPROGRAM))
485 ch->next_match_value = ch->max_match_value;
486
487 ch->flags |= FLAG_IRQCONTEXT;
488
489 if (ch->flags & FLAG_CLOCKEVENT) {
490 if (!(ch->flags & FLAG_SKIPEVENT)) {
491 if (ch->ced.mode == CLOCK_EVT_MODE_ONESHOT) {
492 ch->next_match_value = ch->max_match_value;
493 ch->flags |= FLAG_REPROGRAM;
494 }
495
496 ch->ced.event_handler(&ch->ced);
497 }
498 }
499
500 ch->flags &= ~FLAG_SKIPEVENT;
501
502 if (ch->flags & FLAG_REPROGRAM) {
503 ch->flags &= ~FLAG_REPROGRAM;
504 sh_cmt_clock_event_program_verify(ch, 1);
505
506 if (ch->flags & FLAG_CLOCKEVENT)
507 if ((ch->ced.mode == CLOCK_EVT_MODE_SHUTDOWN)
508 || (ch->match_value == ch->next_match_value))
509 ch->flags &= ~FLAG_REPROGRAM;
510 }
511
512 ch->flags &= ~FLAG_IRQCONTEXT;
513
514 return IRQ_HANDLED;
515}
516
517static int sh_cmt_start(struct sh_cmt_channel *ch, unsigned long flag)
518{
519 int ret = 0;
520 unsigned long flags;
521
522 raw_spin_lock_irqsave(&ch->lock, flags);
523
524 if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
525 ret = sh_cmt_enable(ch, &ch->rate);
526
527 if (ret)
528 goto out;
529 ch->flags |= flag;
530
531 /* setup timeout if no clockevent */
532 if ((flag == FLAG_CLOCKSOURCE) && (!(ch->flags & FLAG_CLOCKEVENT)))
533 __sh_cmt_set_next(ch, ch->max_match_value);
534 out:
535 raw_spin_unlock_irqrestore(&ch->lock, flags);
536
537 return ret;
538}
539
540static void sh_cmt_stop(struct sh_cmt_channel *ch, unsigned long flag)
541{
542 unsigned long flags;
543 unsigned long f;
544
545 raw_spin_lock_irqsave(&ch->lock, flags);
546
547 f = ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
548 ch->flags &= ~flag;
549
550 if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
551 sh_cmt_disable(ch);
552
553 /* adjust the timeout to maximum if only clocksource left */
554 if ((flag == FLAG_CLOCKEVENT) && (ch->flags & FLAG_CLOCKSOURCE))
555 __sh_cmt_set_next(ch, ch->max_match_value);
556
557 raw_spin_unlock_irqrestore(&ch->lock, flags);
558}
559
560static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
561{
562 return container_of(cs, struct sh_cmt_channel, cs);
563}
564
565static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
566{
567 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
568 unsigned long flags, raw;
569 unsigned long value;
570 int has_wrapped;
571
572 raw_spin_lock_irqsave(&ch->lock, flags);
573 value = ch->total_cycles;
574 raw = sh_cmt_get_counter(ch, &has_wrapped);
575
576 if (unlikely(has_wrapped))
577 raw += ch->match_value + 1;
578 raw_spin_unlock_irqrestore(&ch->lock, flags);
579
580 return value + raw;
581}
582
583static int sh_cmt_clocksource_enable(struct clocksource *cs)
584{
585 int ret;
586 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
587
588 WARN_ON(ch->cs_enabled);
589
590 ch->total_cycles = 0;
591
592 ret = sh_cmt_start(ch, FLAG_CLOCKSOURCE);
593 if (!ret) {
594 __clocksource_updatefreq_hz(cs, ch->rate);
595 ch->cs_enabled = true;
596 }
597 return ret;
598}
599
600static void sh_cmt_clocksource_disable(struct clocksource *cs)
601{
602 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
603
604 WARN_ON(!ch->cs_enabled);
605
606 sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
607 ch->cs_enabled = false;
608}
609
610static void sh_cmt_clocksource_suspend(struct clocksource *cs)
611{
612 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
613
614 sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
615 pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);
616}
617
618static void sh_cmt_clocksource_resume(struct clocksource *cs)
619{
620 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
621
622 pm_genpd_syscore_poweron(&ch->cmt->pdev->dev);
623 sh_cmt_start(ch, FLAG_CLOCKSOURCE);
624}
625
626static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch,
627 const char *name, unsigned long rating)
628{
629 struct clocksource *cs = &ch->cs;
630
631 cs->name = name;
632 cs->rating = rating;
633 cs->read = sh_cmt_clocksource_read;
634 cs->enable = sh_cmt_clocksource_enable;
635 cs->disable = sh_cmt_clocksource_disable;
636 cs->suspend = sh_cmt_clocksource_suspend;
637 cs->resume = sh_cmt_clocksource_resume;
638 cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
639 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
640
641 dev_info(&ch->cmt->pdev->dev, "ch%u: used as clock source\n",
642 ch->index);
643
644 /* Register with dummy 1 Hz value, gets updated in ->enable() */
645 clocksource_register_hz(cs, 1);
646 return 0;
647}
648
649static struct sh_cmt_channel *ced_to_sh_cmt(struct clock_event_device *ced)
650{
651 return container_of(ced, struct sh_cmt_channel, ced);
652}
653
654static void sh_cmt_clock_event_start(struct sh_cmt_channel *ch, int periodic)
655{
656 struct clock_event_device *ced = &ch->ced;
657
658 sh_cmt_start(ch, FLAG_CLOCKEVENT);
659
660 /* TODO: calculate good shift from rate and counter bit width */
661
662 ced->shift = 32;
663 ced->mult = div_sc(ch->rate, NSEC_PER_SEC, ced->shift);
664 ced->max_delta_ns = clockevent_delta2ns(ch->max_match_value, ced);
665 ced->min_delta_ns = clockevent_delta2ns(0x1f, ced);
666
667 if (periodic)
668 sh_cmt_set_next(ch, ((ch->rate + HZ/2) / HZ) - 1);
669 else
670 sh_cmt_set_next(ch, ch->max_match_value);
671}
672
673static void sh_cmt_clock_event_mode(enum clock_event_mode mode,
674 struct clock_event_device *ced)
675{
676 struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
677
678 /* deal with old setting first */
679 switch (ced->mode) {
680 case CLOCK_EVT_MODE_PERIODIC:
681 case CLOCK_EVT_MODE_ONESHOT:
682 sh_cmt_stop(ch, FLAG_CLOCKEVENT);
683 break;
684 default:
685 break;
686 }
687
688 switch (mode) {
689 case CLOCK_EVT_MODE_PERIODIC:
690 dev_info(&ch->cmt->pdev->dev,
691 "ch%u: used for periodic clock events\n", ch->index);
692 sh_cmt_clock_event_start(ch, 1);
693 break;
694 case CLOCK_EVT_MODE_ONESHOT:
695 dev_info(&ch->cmt->pdev->dev,
696 "ch%u: used for oneshot clock events\n", ch->index);
697 sh_cmt_clock_event_start(ch, 0);
698 break;
699 case CLOCK_EVT_MODE_SHUTDOWN:
700 case CLOCK_EVT_MODE_UNUSED:
701 sh_cmt_stop(ch, FLAG_CLOCKEVENT);
702 break;
703 default:
704 break;
705 }
706}
707
708static int sh_cmt_clock_event_next(unsigned long delta,
709 struct clock_event_device *ced)
710{
711 struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
712
713 BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
714 if (likely(ch->flags & FLAG_IRQCONTEXT))
715 ch->next_match_value = delta - 1;
716 else
717 sh_cmt_set_next(ch, delta - 1);
718
719 return 0;
720}
721
722static void sh_cmt_clock_event_suspend(struct clock_event_device *ced)
723{
724 struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
725
726 pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);
727 clk_unprepare(ch->cmt->clk);
728}
729
730static void sh_cmt_clock_event_resume(struct clock_event_device *ced)
731{
732 struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
733
734 clk_prepare(ch->cmt->clk);
735 pm_genpd_syscore_poweron(&ch->cmt->pdev->dev);
736}
737
738static void sh_cmt_register_clockevent(struct sh_cmt_channel *ch,
739 const char *name, unsigned long rating)
740{
741 struct clock_event_device *ced = &ch->ced;
742
743 ced->name = name;
744 ced->features = CLOCK_EVT_FEAT_PERIODIC;
745 ced->features |= CLOCK_EVT_FEAT_ONESHOT;
746 ced->rating = rating;
747 ced->cpumask = cpumask_of(0);
748 ced->set_next_event = sh_cmt_clock_event_next;
749 ced->set_mode = sh_cmt_clock_event_mode;
750 ced->suspend = sh_cmt_clock_event_suspend;
751 ced->resume = sh_cmt_clock_event_resume;
752
753 dev_info(&ch->cmt->pdev->dev, "ch%u: used for clock events\n",
754 ch->index);
755 clockevents_register_device(ced);
756}
757
758static int sh_cmt_register(struct sh_cmt_channel *ch, const char *name,
759 unsigned long clockevent_rating,
760 unsigned long clocksource_rating)
761{
762 if (clockevent_rating)
763 sh_cmt_register_clockevent(ch, name, clockevent_rating);
764
765 if (clocksource_rating)
766 sh_cmt_register_clocksource(ch, name, clocksource_rating);
767
768 return 0;
769}
770
771static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index,
772 struct sh_cmt_device *cmt)
773{
774 struct sh_timer_config *cfg = cmt->pdev->dev.platform_data;
775 int irq;
776 int ret;
777
778 ch->cmt = cmt;
779 ch->base = cmt->mapbase_ch;
780 ch->index = index;
781
782 irq = platform_get_irq(cmt->pdev, 0);
783 if (irq < 0) {
784 dev_err(&cmt->pdev->dev, "ch%u: failed to get irq\n",
785 ch->index);
786 return irq;
787 }
788
789 if (cmt->info->width == (sizeof(ch->max_match_value) * 8))
790 ch->max_match_value = ~0;
791 else
792 ch->max_match_value = (1 << cmt->info->width) - 1;
793
794 ch->match_value = ch->max_match_value;
795 raw_spin_lock_init(&ch->lock);
796
797 ret = sh_cmt_register(ch, dev_name(&cmt->pdev->dev),
798 cfg->clockevent_rating,
799 cfg->clocksource_rating);
800 if (ret) {
801 dev_err(&cmt->pdev->dev, "ch%u: registration failed\n",
802 ch->index);
803 return ret;
804 }
805 ch->cs_enabled = false;
806
807 ret = request_irq(irq, sh_cmt_interrupt,
808 IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
809 dev_name(&cmt->pdev->dev), ch);
810 if (ret) {
811 dev_err(&cmt->pdev->dev, "ch%u: failed to request irq %d\n",
812 ch->index, irq);
813 return ret;
814 }
815
816 return 0;
817}
818
819static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
820{
821 struct sh_timer_config *cfg = pdev->dev.platform_data;
822 struct resource *res, *res2;
823 int ret;
824 ret = -ENXIO;
825
826 cmt->pdev = pdev;
827
828 if (!cfg) {
829 dev_err(&cmt->pdev->dev, "missing platform data\n");
830 goto err0;
831 }
832
833 res = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 0);
834 if (!res) {
835 dev_err(&cmt->pdev->dev, "failed to get I/O memory\n");
836 goto err0;
837 }
838
839 /* optional resource for the shared timer start/stop register */
840 res2 = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 1);
841
842 /* map memory, let mapbase_ch point to our channel */
843 cmt->mapbase_ch = ioremap_nocache(res->start, resource_size(res));
844 if (cmt->mapbase_ch == NULL) {
845 dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n");
846 goto err0;
847 }
848
849 /* map second resource for CMSTR */
850 cmt->mapbase = ioremap_nocache(res2 ? res2->start :
851 res->start - cfg->channel_offset,
852 res2 ? resource_size(res2) : 2);
853 if (cmt->mapbase == NULL) {
854 dev_err(&cmt->pdev->dev, "failed to remap I/O second memory\n");
855 goto err1;
856 }
857
858 /* get hold of clock */
859 cmt->clk = clk_get(&cmt->pdev->dev, "cmt_fck");
860 if (IS_ERR(cmt->clk)) {
861 dev_err(&cmt->pdev->dev, "cannot get clock\n");
862 ret = PTR_ERR(cmt->clk);
863 goto err2;
864 }
865
866 ret = clk_prepare(cmt->clk);
867 if (ret < 0)
868 goto err3;
869
870 /* identify the model based on the resources */
871 if (resource_size(res) == 6)
872 cmt->info = &sh_cmt_info[SH_CMT_16BIT];
873 else if (res2 && (resource_size(res2) == 4))
874 cmt->info = &sh_cmt_info[SH_CMT_48BIT_GEN2];
875 else
876 cmt->info = &sh_cmt_info[SH_CMT_32BIT];
877
878 cmt->channels = kzalloc(sizeof(*cmt->channels), GFP_KERNEL);
879 if (cmt->channels == NULL) {
880 ret = -ENOMEM;
881 goto err4;
882 }
883
884 cmt->num_channels = 1;
885
886 ret = sh_cmt_setup_channel(&cmt->channels[0], cfg->timer_bit, cmt);
887 if (ret < 0)
888 goto err4;
889
890 platform_set_drvdata(pdev, cmt);
891
892 return 0;
893err4:
894 kfree(cmt->channels);
895 clk_unprepare(cmt->clk);
896err3:
897 clk_put(cmt->clk);
898err2:
899 iounmap(cmt->mapbase);
900err1:
901 iounmap(cmt->mapbase_ch);
902err0:
903 return ret;
904}
905
906static int sh_cmt_probe(struct platform_device *pdev)
907{
908 struct sh_cmt_device *cmt = platform_get_drvdata(pdev);
909 struct sh_timer_config *cfg = pdev->dev.platform_data;
910 int ret;
911
912 if (!is_early_platform_device(pdev)) {
913 pm_runtime_set_active(&pdev->dev);
914 pm_runtime_enable(&pdev->dev);
915 }
916
917 if (cmt) {
918 dev_info(&pdev->dev, "kept as earlytimer\n");
919 goto out;
920 }
921
922 cmt = kzalloc(sizeof(*cmt), GFP_KERNEL);
923 if (cmt == NULL) {
924 dev_err(&pdev->dev, "failed to allocate driver data\n");
925 return -ENOMEM;
926 }
927
928 ret = sh_cmt_setup(cmt, pdev);
929 if (ret) {
930 kfree(cmt);
931 pm_runtime_idle(&pdev->dev);
932 return ret;
933 }
934 if (is_early_platform_device(pdev))
935 return 0;
936
937 out:
938 if (cfg->clockevent_rating || cfg->clocksource_rating)
939 pm_runtime_irq_safe(&pdev->dev);
940 else
941 pm_runtime_idle(&pdev->dev);
942
943 return 0;
944}
945
946static int sh_cmt_remove(struct platform_device *pdev)
947{
948 return -EBUSY; /* cannot unregister clockevent and clocksource */
949}
950
951static struct platform_driver sh_cmt_device_driver = {
952 .probe = sh_cmt_probe,
953 .remove = sh_cmt_remove,
954 .driver = {
955 .name = "sh_cmt",
956 }
957};
958
959static int __init sh_cmt_init(void)
960{
961 return platform_driver_register(&sh_cmt_device_driver);
962}
963
964static void __exit sh_cmt_exit(void)
965{
966 platform_driver_unregister(&sh_cmt_device_driver);
967}
968
969early_platform_init("earlytimer", &sh_cmt_device_driver);
970subsys_initcall(sh_cmt_init);
971module_exit(sh_cmt_exit);
972
973MODULE_AUTHOR("Magnus Damm");
974MODULE_DESCRIPTION("SuperH CMT Timer Driver");
975MODULE_LICENSE("GPL v2");
This page took 0.026863 seconds and 5 git commands to generate.