[BLACKFIN] minor irq handler cleanups
[deliverable/linux.git] / arch / blackfin / kernel / time.c
CommitLineData
1394f032
BW
1/*
2 * File: arch/blackfin/kernel/time.c
3 * Based on: none - original work
4 * Author:
5 *
6 * Created:
7 * Description: This file contains the bfin-specific time handling details.
8 * Most of the stuff is located in the machine specific files.
9 *
10 * Modified:
11 * Copyright 2004-2006 Analog Devices Inc.
12 *
13 * Bugs: Enter bugs at http://blackfin.uclinux.org/
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, see the file COPYING, or write
27 * to the Free Software Foundation, Inc.,
28 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
29 */
30
31#include <linux/module.h>
32#include <linux/profile.h>
33#include <linux/interrupt.h>
34#include <linux/time.h>
35#include <linux/irq.h>
36
37#include <asm/blackfin.h>
38
39/* This is an NTP setting */
40#define TICK_SIZE (tick_nsec / 1000)
41
349a2444 42static void time_sched_init(irq_handler_t timer_routine);
1394f032 43static unsigned long gettimeoffset(void);
1394f032
BW
44
45static struct irqaction bfin_timer_irq = {
46 .name = "BFIN Timer Tick",
47 .flags = IRQF_DISABLED
48};
49
50/*
51 * The way that the Blackfin core timer works is:
52 * - CCLK is divided by a programmable 8-bit pre-scaler (TSCALE)
53 * - Every time TSCALE ticks, a 32bit is counted down (TCOUNT)
54 *
55 * If you take the fastest clock (1ns, or 1GHz to make the math work easier)
56 * 10ms is 10,000,000 clock ticks, which fits easy into a 32-bit counter
57 * (32 bit counter is 4,294,967,296ns or 4.2 seconds) so, we don't need
58 * to use TSCALE, and program it to zero (which is pass CCLK through).
59 * If you feel like using it, try to keep HZ * TIMESCALE to some
60 * value that divides easy (like power of 2).
61 */
62
63#define TIME_SCALE 1
64
65static void
349a2444 66time_sched_init(irq_handler_t timer_routine)
1394f032
BW
67{
68 u32 tcount;
69
70 /* power up the timer, but don't enable it just yet */
71 bfin_write_TCNTL(1);
72 CSYNC();
73
74 /*
75 * the TSCALE prescaler counter.
76 */
77 bfin_write_TSCALE((TIME_SCALE - 1));
78
79 tcount = ((get_cclk() / (HZ * TIME_SCALE)) - 1);
80 bfin_write_TPERIOD(tcount);
81 bfin_write_TCOUNT(tcount);
82
83 /* now enable the timer */
84 CSYNC();
85
86 bfin_write_TCNTL(7);
87
88 bfin_timer_irq.handler = (irq_handler_t)timer_routine;
89 /* call setup_irq instead of request_irq because request_irq calls
90 * kmalloc which has not been initialized yet
91 */
92 setup_irq(IRQ_CORETMR, &bfin_timer_irq);
93}
94
95/*
96 * Should return useconds since last timer tick
97 */
98static unsigned long gettimeoffset(void)
99{
100 unsigned long offset;
101 unsigned long clocks_per_jiffy;
102
103 clocks_per_jiffy = bfin_read_TPERIOD();
104 offset =
105 (clocks_per_jiffy -
106 bfin_read_TCOUNT()) / (((clocks_per_jiffy + 1) * HZ) /
107 USEC_PER_SEC);
108
109 /* Check if we just wrapped the counters and maybe missed a tick */
110 if ((bfin_read_ILAT() & (1 << IRQ_CORETMR))
111 && (offset < (100000 / HZ / 2)))
112 offset += (USEC_PER_SEC / HZ);
113
114 return offset;
115}
116
117static inline int set_rtc_mmss(unsigned long nowtime)
118{
119 return 0;
120}
121
122/*
123 * timer_interrupt() needs to keep up the real-time clock,
124 * as well as call the "do_timer()" routine every clocktick
125 */
126#ifdef CONFIG_CORE_TIMER_IRQ_L1
127irqreturn_t timer_interrupt(int irq, void *dummy)__attribute__((l1_text));
128#endif
129
130irqreturn_t timer_interrupt(int irq, void *dummy)
131{
132 /* last time the cmos clock got updated */
1f83b8f1 133 static long last_rtc_update;
1394f032
BW
134
135 write_seqlock(&xtime_lock);
136
137 do_timer(1);
1394f032 138
1394f032
BW
139 profile_tick(CPU_PROFILING);
140
141 /*
142 * If we have an externally synchronized Linux clock, then update
143 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
144 * called as close as possible to 500 ms before the new second starts.
145 */
146
147 if (ntp_synced() &&
148 xtime.tv_sec > last_rtc_update + 660 &&
149 (xtime.tv_nsec / NSEC_PER_USEC) >=
150 500000 - ((unsigned)TICK_SIZE) / 2
151 && (xtime.tv_nsec / NSEC_PER_USEC) <=
152 500000 + ((unsigned)TICK_SIZE) / 2) {
153 if (set_rtc_mmss(xtime.tv_sec) == 0)
154 last_rtc_update = xtime.tv_sec;
155 else
156 /* Do it again in 60s. */
157 last_rtc_update = xtime.tv_sec - 600;
158 }
159 write_sequnlock(&xtime_lock);
aa02cd2d
PZ
160
161#ifndef CONFIG_SMP
162 update_process_times(user_mode(get_irq_regs()));
163#endif
164
1394f032
BW
165 return IRQ_HANDLED;
166}
167
168void __init time_init(void)
169{
170 time_t secs_since_1970 = (365 * 37 + 9) * 24 * 60 * 60; /* 1 Jan 2007 */
171
172#ifdef CONFIG_RTC_DRV_BFIN
173 /* [#2663] hack to filter junk RTC values that would cause
174 * userspace to have to deal with time values greater than
175 * 2^31 seconds (which uClibc cannot cope with yet)
176 */
177 if ((bfin_read_RTC_STAT() & 0xC0000000) == 0xC0000000) {
178 printk(KERN_NOTICE "bfin-rtc: invalid date; resetting\n");
179 bfin_write_RTC_STAT(0);
180 }
181#endif
182
183 /* Initialize xtime. From now on, xtime is updated with timer interrupts */
184 xtime.tv_sec = secs_since_1970;
185 xtime.tv_nsec = 0;
186
187 wall_to_monotonic.tv_sec = -xtime.tv_sec;
188
189 time_sched_init(timer_interrupt);
190}
191
192#ifndef CONFIG_GENERIC_TIME
193void do_gettimeofday(struct timeval *tv)
194{
195 unsigned long flags;
196 unsigned long seq;
197 unsigned long usec, sec;
198
199 do {
200 seq = read_seqbegin_irqsave(&xtime_lock, flags);
201 usec = gettimeoffset();
202 sec = xtime.tv_sec;
203 usec += (xtime.tv_nsec / NSEC_PER_USEC);
204 }
205 while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
206
207 while (usec >= USEC_PER_SEC) {
208 usec -= USEC_PER_SEC;
209 sec++;
210 }
211
212 tv->tv_sec = sec;
213 tv->tv_usec = usec;
214}
215EXPORT_SYMBOL(do_gettimeofday);
216
217int do_settimeofday(struct timespec *tv)
218{
219 time_t wtm_sec, sec = tv->tv_sec;
220 long wtm_nsec, nsec = tv->tv_nsec;
221
222 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
223 return -EINVAL;
224
225 write_seqlock_irq(&xtime_lock);
226 /*
227 * This is revolting. We need to set the xtime.tv_usec
228 * correctly. However, the value in this location is
229 * is value at the last tick.
230 * Discover what correction gettimeofday
231 * would have done, and then undo it!
232 */
233 nsec -= (gettimeoffset() * NSEC_PER_USEC);
234
235 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
236 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
237
238 set_normalized_timespec(&xtime, sec, nsec);
239 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
240
241 ntp_clear();
242
243 write_sequnlock_irq(&xtime_lock);
244 clock_was_set();
245
246 return 0;
247}
248EXPORT_SYMBOL(do_settimeofday);
249#endif /* !CONFIG_GENERIC_TIME */
250
251/*
252 * Scheduler clock - returns current time in nanosec units.
253 */
254unsigned long long sched_clock(void)
255{
256 return (unsigned long long)jiffies *(NSEC_PER_SEC / HZ);
257}
This page took 0.163179 seconds and 5 git commands to generate.