clocksource: sh_mtu2: Drop support for legacy platform data
[deliverable/linux.git] / drivers / clocksource / sh_mtu2.c
CommitLineData
d5ed4c2e
MD
1/*
2 * SuperH Timer Support - MTU2
3 *
4 * Copyright (C) 2009 Magnus Damm
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
d5ed4c2e
MD
14 */
15
346f5e76
LP
16#include <linux/clk.h>
17#include <linux/clockchips.h>
18#include <linux/delay.h>
19#include <linux/err.h>
d5ed4c2e 20#include <linux/init.h>
d5ed4c2e 21#include <linux/interrupt.h>
d5ed4c2e 22#include <linux/io.h>
346f5e76 23#include <linux/ioport.h>
d5ed4c2e 24#include <linux/irq.h>
7deeab5d 25#include <linux/module.h>
346f5e76 26#include <linux/platform_device.h>
57d13370 27#include <linux/pm_domain.h>
3cb6f10a 28#include <linux/pm_runtime.h>
346f5e76
LP
29#include <linux/sh_timer.h>
30#include <linux/slab.h>
31#include <linux/spinlock.h>
d5ed4c2e 32
7dad72de 33struct sh_mtu2_device;
42752cc6
LP
34
35struct sh_mtu2_channel {
7dad72de 36 struct sh_mtu2_device *mtu;
d2b93177 37 unsigned int index;
da90a1c6
LP
38
39 void __iomem *base;
da90a1c6 40
42752cc6
LP
41 struct clock_event_device ced;
42};
43
7dad72de 44struct sh_mtu2_device {
42752cc6
LP
45 struct platform_device *pdev;
46
d5ed4c2e
MD
47 void __iomem *mapbase;
48 struct clk *clk;
42752cc6 49
c54ccb43
LP
50 struct sh_mtu2_channel *channels;
51 unsigned int num_channels;
faf3f4f8 52
faf3f4f8 53 bool has_clockevent;
d5ed4c2e
MD
54};
55
50393a92 56static DEFINE_RAW_SPINLOCK(sh_mtu2_lock);
d5ed4c2e
MD
57
58#define TSTR -1 /* shared register */
59#define TCR 0 /* channel register */
60#define TMDR 1 /* channel register */
61#define TIOR 2 /* channel register */
62#define TIER 3 /* channel register */
63#define TSR 4 /* channel register */
64#define TCNT 5 /* channel register */
65#define TGR 6 /* channel register */
66
f992c241
LP
67#define TCR_CCLR_NONE (0 << 5)
68#define TCR_CCLR_TGRA (1 << 5)
69#define TCR_CCLR_TGRB (2 << 5)
70#define TCR_CCLR_SYNC (3 << 5)
71#define TCR_CCLR_TGRC (5 << 5)
72#define TCR_CCLR_TGRD (6 << 5)
73#define TCR_CCLR_MASK (7 << 5)
74#define TCR_CKEG_RISING (0 << 3)
75#define TCR_CKEG_FALLING (1 << 3)
76#define TCR_CKEG_BOTH (2 << 3)
77#define TCR_CKEG_MASK (3 << 3)
78/* Values 4 to 7 are channel-dependent */
79#define TCR_TPSC_P1 (0 << 0)
80#define TCR_TPSC_P4 (1 << 0)
81#define TCR_TPSC_P16 (2 << 0)
82#define TCR_TPSC_P64 (3 << 0)
83#define TCR_TPSC_CH0_TCLKA (4 << 0)
84#define TCR_TPSC_CH0_TCLKB (5 << 0)
85#define TCR_TPSC_CH0_TCLKC (6 << 0)
86#define TCR_TPSC_CH0_TCLKD (7 << 0)
87#define TCR_TPSC_CH1_TCLKA (4 << 0)
88#define TCR_TPSC_CH1_TCLKB (5 << 0)
89#define TCR_TPSC_CH1_P256 (6 << 0)
90#define TCR_TPSC_CH1_TCNT2 (7 << 0)
91#define TCR_TPSC_CH2_TCLKA (4 << 0)
92#define TCR_TPSC_CH2_TCLKB (5 << 0)
93#define TCR_TPSC_CH2_TCLKC (6 << 0)
94#define TCR_TPSC_CH2_P1024 (7 << 0)
95#define TCR_TPSC_CH34_P256 (4 << 0)
96#define TCR_TPSC_CH34_P1024 (5 << 0)
97#define TCR_TPSC_CH34_TCLKA (6 << 0)
98#define TCR_TPSC_CH34_TCLKB (7 << 0)
99#define TCR_TPSC_MASK (7 << 0)
100
101#define TMDR_BFE (1 << 6)
102#define TMDR_BFB (1 << 5)
103#define TMDR_BFA (1 << 4)
104#define TMDR_MD_NORMAL (0 << 0)
105#define TMDR_MD_PWM_1 (2 << 0)
106#define TMDR_MD_PWM_2 (3 << 0)
107#define TMDR_MD_PHASE_1 (4 << 0)
108#define TMDR_MD_PHASE_2 (5 << 0)
109#define TMDR_MD_PHASE_3 (6 << 0)
110#define TMDR_MD_PHASE_4 (7 << 0)
111#define TMDR_MD_PWM_SYNC (8 << 0)
112#define TMDR_MD_PWM_COMP_CREST (13 << 0)
113#define TMDR_MD_PWM_COMP_TROUGH (14 << 0)
114#define TMDR_MD_PWM_COMP_BOTH (15 << 0)
115#define TMDR_MD_MASK (15 << 0)
116
117#define TIOC_IOCH(n) ((n) << 4)
118#define TIOC_IOCL(n) ((n) << 0)
119#define TIOR_OC_RETAIN (0 << 0)
120#define TIOR_OC_0_CLEAR (1 << 0)
121#define TIOR_OC_0_SET (2 << 0)
122#define TIOR_OC_0_TOGGLE (3 << 0)
123#define TIOR_OC_1_CLEAR (5 << 0)
124#define TIOR_OC_1_SET (6 << 0)
125#define TIOR_OC_1_TOGGLE (7 << 0)
126#define TIOR_IC_RISING (8 << 0)
127#define TIOR_IC_FALLING (9 << 0)
128#define TIOR_IC_BOTH (10 << 0)
129#define TIOR_IC_TCNT (12 << 0)
130#define TIOR_MASK (15 << 0)
131
132#define TIER_TTGE (1 << 7)
133#define TIER_TTGE2 (1 << 6)
134#define TIER_TCIEU (1 << 5)
135#define TIER_TCIEV (1 << 4)
136#define TIER_TGIED (1 << 3)
137#define TIER_TGIEC (1 << 2)
138#define TIER_TGIEB (1 << 1)
139#define TIER_TGIEA (1 << 0)
140
141#define TSR_TCFD (1 << 7)
142#define TSR_TCFU (1 << 5)
143#define TSR_TCFV (1 << 4)
144#define TSR_TGFD (1 << 3)
145#define TSR_TGFC (1 << 2)
146#define TSR_TGFB (1 << 1)
147#define TSR_TGFA (1 << 0)
148
d5ed4c2e
MD
149static unsigned long mtu2_reg_offs[] = {
150 [TCR] = 0,
151 [TMDR] = 1,
152 [TIOR] = 2,
153 [TIER] = 4,
154 [TSR] = 5,
155 [TCNT] = 6,
156 [TGR] = 8,
157};
158
42752cc6 159static inline unsigned long sh_mtu2_read(struct sh_mtu2_channel *ch, int reg_nr)
d5ed4c2e 160{
d5ed4c2e
MD
161 unsigned long offs;
162
1a5da0e4
LP
163 if (reg_nr == TSTR)
164 return ioread8(ch->mtu->mapbase + 0x280);
d5ed4c2e
MD
165
166 offs = mtu2_reg_offs[reg_nr];
167
168 if ((reg_nr == TCNT) || (reg_nr == TGR))
da90a1c6 169 return ioread16(ch->base + offs);
d5ed4c2e 170 else
da90a1c6 171 return ioread8(ch->base + offs);
d5ed4c2e
MD
172}
173
42752cc6 174static inline void sh_mtu2_write(struct sh_mtu2_channel *ch, int reg_nr,
d5ed4c2e
MD
175 unsigned long value)
176{
d5ed4c2e
MD
177 unsigned long offs;
178
1a5da0e4
LP
179 if (reg_nr == TSTR)
180 return iowrite8(value, ch->mtu->mapbase + 0x280);
d5ed4c2e
MD
181
182 offs = mtu2_reg_offs[reg_nr];
183
184 if ((reg_nr == TCNT) || (reg_nr == TGR))
da90a1c6 185 iowrite16(value, ch->base + offs);
d5ed4c2e 186 else
da90a1c6 187 iowrite8(value, ch->base + offs);
d5ed4c2e
MD
188}
189
42752cc6 190static void sh_mtu2_start_stop_ch(struct sh_mtu2_channel *ch, int start)
d5ed4c2e 191{
d5ed4c2e
MD
192 unsigned long flags, value;
193
194 /* start stop register shared by multiple timer channels */
50393a92 195 raw_spin_lock_irqsave(&sh_mtu2_lock, flags);
42752cc6 196 value = sh_mtu2_read(ch, TSTR);
d5ed4c2e
MD
197
198 if (start)
d2b93177 199 value |= 1 << ch->index;
d5ed4c2e 200 else
d2b93177 201 value &= ~(1 << ch->index);
d5ed4c2e 202
42752cc6 203 sh_mtu2_write(ch, TSTR, value);
50393a92 204 raw_spin_unlock_irqrestore(&sh_mtu2_lock, flags);
d5ed4c2e
MD
205}
206
42752cc6 207static int sh_mtu2_enable(struct sh_mtu2_channel *ch)
d5ed4c2e 208{
f92d62f5
LP
209 unsigned long periodic;
210 unsigned long rate;
d5ed4c2e
MD
211 int ret;
212
42752cc6
LP
213 pm_runtime_get_sync(&ch->mtu->pdev->dev);
214 dev_pm_syscore_device(&ch->mtu->pdev->dev, true);
3cb6f10a 215
d5ed4c2e 216 /* enable clock */
42752cc6 217 ret = clk_enable(ch->mtu->clk);
d5ed4c2e 218 if (ret) {
d2b93177
LP
219 dev_err(&ch->mtu->pdev->dev, "ch%u: cannot enable clock\n",
220 ch->index);
d5ed4c2e
MD
221 return ret;
222 }
223
224 /* make sure channel is disabled */
42752cc6 225 sh_mtu2_start_stop_ch(ch, 0);
d5ed4c2e 226
42752cc6 227 rate = clk_get_rate(ch->mtu->clk) / 64;
f92d62f5 228 periodic = (rate + HZ/2) / HZ;
d5ed4c2e 229
f992c241
LP
230 /*
231 * "Periodic Counter Operation"
232 * Clear on TGRA compare match, divide clock by 64.
233 */
234 sh_mtu2_write(ch, TCR, TCR_CCLR_TGRA | TCR_TPSC_P64);
235 sh_mtu2_write(ch, TIOR, TIOC_IOCH(TIOR_OC_0_CLEAR) |
236 TIOC_IOCL(TIOR_OC_0_CLEAR));
42752cc6
LP
237 sh_mtu2_write(ch, TGR, periodic);
238 sh_mtu2_write(ch, TCNT, 0);
f992c241
LP
239 sh_mtu2_write(ch, TMDR, TMDR_MD_NORMAL);
240 sh_mtu2_write(ch, TIER, TIER_TGIEA);
d5ed4c2e
MD
241
242 /* enable channel */
42752cc6 243 sh_mtu2_start_stop_ch(ch, 1);
d5ed4c2e
MD
244
245 return 0;
246}
247
42752cc6 248static void sh_mtu2_disable(struct sh_mtu2_channel *ch)
d5ed4c2e
MD
249{
250 /* disable channel */
42752cc6 251 sh_mtu2_start_stop_ch(ch, 0);
d5ed4c2e
MD
252
253 /* stop clock */
42752cc6 254 clk_disable(ch->mtu->clk);
3cb6f10a 255
42752cc6
LP
256 dev_pm_syscore_device(&ch->mtu->pdev->dev, false);
257 pm_runtime_put(&ch->mtu->pdev->dev);
d5ed4c2e
MD
258}
259
260static irqreturn_t sh_mtu2_interrupt(int irq, void *dev_id)
261{
42752cc6 262 struct sh_mtu2_channel *ch = dev_id;
d5ed4c2e
MD
263
264 /* acknowledge interrupt */
42752cc6 265 sh_mtu2_read(ch, TSR);
f992c241 266 sh_mtu2_write(ch, TSR, ~TSR_TGFA);
d5ed4c2e
MD
267
268 /* notify clockevent layer */
42752cc6 269 ch->ced.event_handler(&ch->ced);
d5ed4c2e
MD
270 return IRQ_HANDLED;
271}
272
42752cc6 273static struct sh_mtu2_channel *ced_to_sh_mtu2(struct clock_event_device *ced)
d5ed4c2e 274{
42752cc6 275 return container_of(ced, struct sh_mtu2_channel, ced);
d5ed4c2e
MD
276}
277
278static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,
279 struct clock_event_device *ced)
280{
42752cc6 281 struct sh_mtu2_channel *ch = ced_to_sh_mtu2(ced);
d5ed4c2e
MD
282 int disabled = 0;
283
284 /* deal with old setting first */
285 switch (ced->mode) {
286 case CLOCK_EVT_MODE_PERIODIC:
42752cc6 287 sh_mtu2_disable(ch);
d5ed4c2e
MD
288 disabled = 1;
289 break;
290 default:
291 break;
292 }
293
294 switch (mode) {
295 case CLOCK_EVT_MODE_PERIODIC:
42752cc6 296 dev_info(&ch->mtu->pdev->dev,
d2b93177 297 "ch%u: used for periodic clock events\n", ch->index);
42752cc6 298 sh_mtu2_enable(ch);
d5ed4c2e
MD
299 break;
300 case CLOCK_EVT_MODE_UNUSED:
301 if (!disabled)
42752cc6 302 sh_mtu2_disable(ch);
d5ed4c2e
MD
303 break;
304 case CLOCK_EVT_MODE_SHUTDOWN:
305 default:
306 break;
307 }
308}
309
cc7ad456
RW
310static void sh_mtu2_clock_event_suspend(struct clock_event_device *ced)
311{
42752cc6 312 pm_genpd_syscore_poweroff(&ced_to_sh_mtu2(ced)->mtu->pdev->dev);
cc7ad456
RW
313}
314
315static void sh_mtu2_clock_event_resume(struct clock_event_device *ced)
316{
42752cc6 317 pm_genpd_syscore_poweron(&ced_to_sh_mtu2(ced)->mtu->pdev->dev);
cc7ad456
RW
318}
319
42752cc6 320static void sh_mtu2_register_clockevent(struct sh_mtu2_channel *ch,
207e21a9 321 const char *name)
d5ed4c2e 322{
42752cc6 323 struct clock_event_device *ced = &ch->ced;
d5ed4c2e 324
d5ed4c2e
MD
325 ced->name = name;
326 ced->features = CLOCK_EVT_FEAT_PERIODIC;
207e21a9 327 ced->rating = 200;
3cc95047 328 ced->cpumask = cpu_possible_mask;
d5ed4c2e 329 ced->set_mode = sh_mtu2_clock_event_mode;
cc7ad456
RW
330 ced->suspend = sh_mtu2_clock_event_suspend;
331 ced->resume = sh_mtu2_clock_event_resume;
d5ed4c2e 332
d2b93177
LP
333 dev_info(&ch->mtu->pdev->dev, "ch%u: used for clock events\n",
334 ch->index);
da64c2a8 335 clockevents_register_device(ced);
d5ed4c2e
MD
336}
337
1a5da0e4 338static int sh_mtu2_register(struct sh_mtu2_channel *ch, const char *name)
d5ed4c2e 339{
1a5da0e4
LP
340 ch->mtu->has_clockevent = true;
341 sh_mtu2_register_clockevent(ch, name);
d5ed4c2e
MD
342
343 return 0;
344}
345
faf3f4f8 346static int sh_mtu2_setup_channel(struct sh_mtu2_channel *ch, unsigned int index,
2e1a5326
LP
347 struct sh_mtu2_device *mtu)
348{
faf3f4f8
LP
349 static const unsigned int channel_offsets[] = {
350 0x300, 0x380, 0x000,
351 };
1a5da0e4
LP
352 char name[6];
353 int irq;
354 int ret;
2e1a5326 355
2e1a5326
LP
356 ch->mtu = mtu;
357
1a5da0e4
LP
358 sprintf(name, "tgi%ua", index);
359 irq = platform_get_irq_byname(mtu->pdev, name);
360 if (irq < 0) {
faf3f4f8 361 /* Skip channels with no declared interrupt. */
1a5da0e4
LP
362 return 0;
363 }
faf3f4f8 364
1a5da0e4
LP
365 ret = request_irq(irq, sh_mtu2_interrupt,
366 IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
367 dev_name(&ch->mtu->pdev->dev), ch);
368 if (ret) {
369 dev_err(&ch->mtu->pdev->dev, "ch%u: failed to request irq %d\n",
370 index, irq);
371 return ret;
2e1a5326
LP
372 }
373
1a5da0e4
LP
374 ch->base = mtu->mapbase + channel_offsets[index];
375 ch->index = index;
376
377 return sh_mtu2_register(ch, dev_name(&mtu->pdev->dev));
2e1a5326
LP
378}
379
faf3f4f8 380static int sh_mtu2_map_memory(struct sh_mtu2_device *mtu)
d5ed4c2e 381{
d5ed4c2e 382 struct resource *res;
d5ed4c2e 383
7dad72de 384 res = platform_get_resource(mtu->pdev, IORESOURCE_MEM, 0);
d5ed4c2e 385 if (!res) {
7dad72de 386 dev_err(&mtu->pdev->dev, "failed to get I/O memory\n");
faf3f4f8 387 return -ENXIO;
d5ed4c2e
MD
388 }
389
faf3f4f8
LP
390 mtu->mapbase = ioremap_nocache(res->start, resource_size(res));
391 if (mtu->mapbase == NULL)
392 return -ENXIO;
393
faf3f4f8
LP
394 return 0;
395}
396
faf3f4f8
LP
397static int sh_mtu2_setup(struct sh_mtu2_device *mtu,
398 struct platform_device *pdev)
399{
faf3f4f8
LP
400 unsigned int i;
401 int ret;
402
403 mtu->pdev = pdev;
da90a1c6 404
faf3f4f8 405 /* Get hold of clock. */
1a5da0e4 406 mtu->clk = clk_get(&mtu->pdev->dev, "fck");
7dad72de
LP
407 if (IS_ERR(mtu->clk)) {
408 dev_err(&mtu->pdev->dev, "cannot get clock\n");
faf3f4f8 409 return PTR_ERR(mtu->clk);
d5ed4c2e
MD
410 }
411
7dad72de 412 ret = clk_prepare(mtu->clk);
bd754930 413 if (ret < 0)
faf3f4f8 414 goto err_clk_put;
bd754930 415
faf3f4f8
LP
416 /* Map the memory resource. */
417 ret = sh_mtu2_map_memory(mtu);
418 if (ret < 0) {
419 dev_err(&mtu->pdev->dev, "failed to remap I/O memory\n");
420 goto err_clk_unprepare;
421 }
422
423 /* Allocate and setup the channels. */
1a5da0e4 424 mtu->num_channels = 3;
faf3f4f8
LP
425
426 mtu->channels = kzalloc(sizeof(*mtu->channels) * mtu->num_channels,
427 GFP_KERNEL);
c54ccb43
LP
428 if (mtu->channels == NULL) {
429 ret = -ENOMEM;
faf3f4f8 430 goto err_unmap;
c54ccb43
LP
431 }
432
1a5da0e4
LP
433 for (i = 0; i < mtu->num_channels; ++i) {
434 ret = sh_mtu2_setup_channel(&mtu->channels[i], i, mtu);
faf3f4f8
LP
435 if (ret < 0)
436 goto err_unmap;
faf3f4f8 437 }
c54ccb43 438
faf3f4f8 439 platform_set_drvdata(pdev, mtu);
a4a5fc3b
LP
440
441 return 0;
faf3f4f8
LP
442
443err_unmap:
c54ccb43 444 kfree(mtu->channels);
1a5da0e4 445 iounmap(mtu->mapbase);
faf3f4f8 446err_clk_unprepare:
7dad72de 447 clk_unprepare(mtu->clk);
faf3f4f8 448err_clk_put:
7dad72de 449 clk_put(mtu->clk);
d5ed4c2e
MD
450 return ret;
451}
452
1850514b 453static int sh_mtu2_probe(struct platform_device *pdev)
d5ed4c2e 454{
7dad72de 455 struct sh_mtu2_device *mtu = platform_get_drvdata(pdev);
d5ed4c2e 456 int ret;
57d13370 457
cc7ad456 458 if (!is_early_platform_device(pdev)) {
3cb6f10a
RW
459 pm_runtime_set_active(&pdev->dev);
460 pm_runtime_enable(&pdev->dev);
cc7ad456 461 }
d5ed4c2e 462
7dad72de 463 if (mtu) {
214a607a 464 dev_info(&pdev->dev, "kept as earlytimer\n");
3cb6f10a 465 goto out;
d5ed4c2e
MD
466 }
467
810c6513 468 mtu = kzalloc(sizeof(*mtu), GFP_KERNEL);
c77a565b 469 if (mtu == NULL)
d5ed4c2e 470 return -ENOMEM;
d5ed4c2e 471
7dad72de 472 ret = sh_mtu2_setup(mtu, pdev);
d5ed4c2e 473 if (ret) {
7dad72de 474 kfree(mtu);
3cb6f10a
RW
475 pm_runtime_idle(&pdev->dev);
476 return ret;
d5ed4c2e 477 }
3cb6f10a
RW
478 if (is_early_platform_device(pdev))
479 return 0;
480
481 out:
faf3f4f8 482 if (mtu->has_clockevent)
3cb6f10a
RW
483 pm_runtime_irq_safe(&pdev->dev);
484 else
485 pm_runtime_idle(&pdev->dev);
486
487 return 0;
d5ed4c2e
MD
488}
489
1850514b 490static int sh_mtu2_remove(struct platform_device *pdev)
d5ed4c2e
MD
491{
492 return -EBUSY; /* cannot unregister clockevent */
493}
494
faf3f4f8 495static const struct platform_device_id sh_mtu2_id_table[] = {
faf3f4f8
LP
496 { "sh-mtu2", 0 },
497 { },
498};
499MODULE_DEVICE_TABLE(platform, sh_mtu2_id_table);
500
d5ed4c2e
MD
501static struct platform_driver sh_mtu2_device_driver = {
502 .probe = sh_mtu2_probe,
1850514b 503 .remove = sh_mtu2_remove,
d5ed4c2e
MD
504 .driver = {
505 .name = "sh_mtu2",
faf3f4f8
LP
506 },
507 .id_table = sh_mtu2_id_table,
d5ed4c2e
MD
508};
509
510static int __init sh_mtu2_init(void)
511{
512 return platform_driver_register(&sh_mtu2_device_driver);
513}
514
515static void __exit sh_mtu2_exit(void)
516{
517 platform_driver_unregister(&sh_mtu2_device_driver);
518}
519
520early_platform_init("earlytimer", &sh_mtu2_device_driver);
342896a5 521subsys_initcall(sh_mtu2_init);
d5ed4c2e
MD
522module_exit(sh_mtu2_exit);
523
524MODULE_AUTHOR("Magnus Damm");
525MODULE_DESCRIPTION("SuperH MTU2 Timer Driver");
526MODULE_LICENSE("GPL v2");
This page took 0.33136 seconds and 5 git commands to generate.