ARM: local timers: Add A15 architected timer support
[deliverable/linux.git] / arch / arm / kernel / arch_timer.c
CommitLineData
022c03a2
MZ
1/*
2 * linux/arch/arm/kernel/arch_timer.c
3 *
4 * Copyright (C) 2011 ARM Ltd.
5 * All Rights Reserved
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/delay.h>
14#include <linux/device.h>
15#include <linux/smp.h>
16#include <linux/cpu.h>
17#include <linux/jiffies.h>
18#include <linux/clockchips.h>
19#include <linux/interrupt.h>
20#include <linux/io.h>
21
22#include <asm/cputype.h>
23#include <asm/localtimer.h>
24#include <asm/arch_timer.h>
25#include <asm/system_info.h>
26
27static unsigned long arch_timer_rate;
28static int arch_timer_ppi;
29static int arch_timer_ppi2;
30
31static struct clock_event_device __percpu **arch_timer_evt;
32
33/*
34 * Architected system timer support.
35 */
36
37#define ARCH_TIMER_CTRL_ENABLE (1 << 0)
38#define ARCH_TIMER_CTRL_IT_MASK (1 << 1)
39#define ARCH_TIMER_CTRL_IT_STAT (1 << 2)
40
41#define ARCH_TIMER_REG_CTRL 0
42#define ARCH_TIMER_REG_FREQ 1
43#define ARCH_TIMER_REG_TVAL 2
44
45static void arch_timer_reg_write(int reg, u32 val)
46{
47 switch (reg) {
48 case ARCH_TIMER_REG_CTRL:
49 asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val));
50 break;
51 case ARCH_TIMER_REG_TVAL:
52 asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val));
53 break;
54 }
55
56 isb();
57}
58
59static u32 arch_timer_reg_read(int reg)
60{
61 u32 val;
62
63 switch (reg) {
64 case ARCH_TIMER_REG_CTRL:
65 asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val));
66 break;
67 case ARCH_TIMER_REG_FREQ:
68 asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val));
69 break;
70 case ARCH_TIMER_REG_TVAL:
71 asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val));
72 break;
73 default:
74 BUG();
75 }
76
77 return val;
78}
79
80static irqreturn_t arch_timer_handler(int irq, void *dev_id)
81{
82 struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
83 unsigned long ctrl;
84
85 ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL);
86 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
87 ctrl |= ARCH_TIMER_CTRL_IT_MASK;
88 arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl);
89 evt->event_handler(evt);
90 return IRQ_HANDLED;
91 }
92
93 return IRQ_NONE;
94}
95
96static void arch_timer_disable(void)
97{
98 unsigned long ctrl;
99
100 ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL);
101 ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
102 arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl);
103}
104
105static void arch_timer_set_mode(enum clock_event_mode mode,
106 struct clock_event_device *clk)
107{
108 switch (mode) {
109 case CLOCK_EVT_MODE_UNUSED:
110 case CLOCK_EVT_MODE_SHUTDOWN:
111 arch_timer_disable();
112 break;
113 default:
114 break;
115 }
116}
117
118static int arch_timer_set_next_event(unsigned long evt,
119 struct clock_event_device *unused)
120{
121 unsigned long ctrl;
122
123 ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL);
124 ctrl |= ARCH_TIMER_CTRL_ENABLE;
125 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
126
127 arch_timer_reg_write(ARCH_TIMER_REG_TVAL, evt);
128 arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl);
129
130 return 0;
131}
132
133static int __cpuinit arch_timer_setup(struct clock_event_device *clk)
134{
135 /* Be safe... */
136 arch_timer_disable();
137
138 clk->features = CLOCK_EVT_FEAT_ONESHOT;
139 clk->name = "arch_sys_timer";
140 clk->rating = 450;
141 clk->set_mode = arch_timer_set_mode;
142 clk->set_next_event = arch_timer_set_next_event;
143 clk->irq = arch_timer_ppi;
144
145 clockevents_config_and_register(clk, arch_timer_rate,
146 0xf, 0x7fffffff);
147
148 *__this_cpu_ptr(arch_timer_evt) = clk;
149
150 enable_percpu_irq(clk->irq, 0);
151 if (arch_timer_ppi2)
152 enable_percpu_irq(arch_timer_ppi2, 0);
153
154 return 0;
155}
156
157/* Is the optional system timer available? */
158static int local_timer_is_architected(void)
159{
160 return (cpu_architecture() >= CPU_ARCH_ARMv7) &&
161 ((read_cpuid_ext(CPUID_EXT_PFR1) >> 16) & 0xf) == 1;
162}
163
164static int arch_timer_available(void)
165{
166 unsigned long freq;
167
168 if (!local_timer_is_architected())
169 return -ENXIO;
170
171 if (arch_timer_rate == 0) {
172 arch_timer_reg_write(ARCH_TIMER_REG_CTRL, 0);
173 freq = arch_timer_reg_read(ARCH_TIMER_REG_FREQ);
174
175 /* Check the timer frequency. */
176 if (freq == 0) {
177 pr_warn("Architected timer frequency not available\n");
178 return -EINVAL;
179 }
180
181 arch_timer_rate = freq;
182 }
183
184 pr_info_once("Architected local timer running at %lu.%02luMHz.\n",
185 arch_timer_rate / 1000000, (arch_timer_rate / 10000) % 100);
186 return 0;
187}
188
189static inline cycle_t arch_counter_get_cntpct(void)
190{
191 u32 cvall, cvalh;
192
193 asm volatile("mrrc p15, 0, %0, %1, c14" : "=r" (cvall), "=r" (cvalh));
194
195 return ((cycle_t) cvalh << 32) | cvall;
196}
197
198static inline cycle_t arch_counter_get_cntvct(void)
199{
200 u32 cvall, cvalh;
201
202 asm volatile("mrrc p15, 1, %0, %1, c14" : "=r" (cvall), "=r" (cvalh));
203
204 return ((cycle_t) cvalh << 32) | cvall;
205}
206
207static cycle_t arch_counter_read(struct clocksource *cs)
208{
209 return arch_counter_get_cntpct();
210}
211
212static struct clocksource clocksource_counter = {
213 .name = "arch_sys_counter",
214 .rating = 400,
215 .read = arch_counter_read,
216 .mask = CLOCKSOURCE_MASK(56),
217 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
218};
219
220static void __cpuinit arch_timer_stop(struct clock_event_device *clk)
221{
222 pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
223 clk->irq, smp_processor_id());
224 disable_percpu_irq(clk->irq);
225 if (arch_timer_ppi2)
226 disable_percpu_irq(arch_timer_ppi2);
227 arch_timer_set_mode(CLOCK_EVT_MODE_UNUSED, clk);
228}
229
230static struct local_timer_ops arch_timer_ops __cpuinitdata = {
231 .setup = arch_timer_setup,
232 .stop = arch_timer_stop,
233};
234
235int __init arch_timer_register(struct arch_timer *at)
236{
237 int err;
238
239 if (at->res[0].start <= 0 || !(at->res[0].flags & IORESOURCE_IRQ))
240 return -EINVAL;
241
242 err = arch_timer_available();
243 if (err)
244 return err;
245
246 arch_timer_evt = alloc_percpu(struct clock_event_device *);
247 if (!arch_timer_evt)
248 return -ENOMEM;
249
250 clocksource_register_hz(&clocksource_counter, arch_timer_rate);
251
252 arch_timer_ppi = at->res[0].start;
253 err = request_percpu_irq(arch_timer_ppi, arch_timer_handler,
254 "arch_timer", arch_timer_evt);
255 if (err) {
256 pr_err("arch_timer: can't register interrupt %d (%d)\n",
257 arch_timer_ppi, err);
258 goto out_free;
259 }
260
261 if (at->res[1].start > 0 || (at->res[1].flags & IORESOURCE_IRQ)) {
262 arch_timer_ppi2 = at->res[1].start;
263 err = request_percpu_irq(arch_timer_ppi2, arch_timer_handler,
264 "arch_timer", arch_timer_evt);
265 if (err) {
266 pr_err("arch_timer: can't register interrupt %d (%d)\n",
267 arch_timer_ppi2, err);
268 arch_timer_ppi2 = 0;
269 goto out_free_irq;
270 }
271 }
272
273 err = local_timer_register(&arch_timer_ops);
274 if (err)
275 goto out_free_irq;
276
277 return 0;
278
279out_free_irq:
280 free_percpu_irq(arch_timer_ppi, arch_timer_evt);
281 if (arch_timer_ppi2)
282 free_percpu_irq(arch_timer_ppi2, arch_timer_evt);
283
284out_free:
285 free_percpu(arch_timer_evt);
286
287 return err;
288}
This page took 0.034392 seconds and 5 git commands to generate.