ext3: Flush disk caches on fsync when needed
[deliverable/linux.git] / arch / blackfin / mach-common / interrupt.S
1 /*
2 * File: arch/blackfin/mach-common/interrupt.S
3 * Based on:
4 * Author: D. Jeff Dionne <jeff@ryeham.ee.ryerson.ca>
5 * Kenneth Albanowski <kjahds@kjahds.com>
6 *
7 * Created: ?
8 * Description: Interrupt Entries
9 *
10 * Modified:
11 * Copyright 2004-2006 Analog Devices Inc.
12 *
13 * Bugs: Enter bugs at http://blackfin.uclinux.org/
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, see the file COPYING, or write
27 * to the Free Software Foundation, Inc.,
28 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
29 */
30
31 #include <asm/blackfin.h>
32 #include <mach/irq.h>
33 #include <linux/linkage.h>
34 #include <asm/entry.h>
35 #include <asm/asm-offsets.h>
36 #include <asm/trace.h>
37 #include <asm/traps.h>
38 #include <asm/thread_info.h>
39
40 #include <asm/context.S>
41
42 .extern _ret_from_exception
43
44 #ifdef CONFIG_I_ENTRY_L1
45 .section .l1.text
46 #else
47 .text
48 #endif
49
50 .align 4 /* just in case */
51
52 /* Common interrupt entry code. First we do CLI, then push
53 * RETI, to keep interrupts disabled, but to allow this state to be changed
54 * by local_bh_enable.
55 * R0 contains the interrupt number, while R1 may contain the value of IPEND,
56 * or garbage if IPEND won't be needed by the ISR. */
57 __common_int_entry:
58 [--sp] = fp;
59 [--sp] = usp;
60
61 [--sp] = i0;
62 [--sp] = i1;
63 [--sp] = i2;
64 [--sp] = i3;
65
66 [--sp] = m0;
67 [--sp] = m1;
68 [--sp] = m2;
69 [--sp] = m3;
70
71 [--sp] = l0;
72 [--sp] = l1;
73 [--sp] = l2;
74 [--sp] = l3;
75
76 [--sp] = b0;
77 [--sp] = b1;
78 [--sp] = b2;
79 [--sp] = b3;
80 [--sp] = a0.x;
81 [--sp] = a0.w;
82 [--sp] = a1.x;
83 [--sp] = a1.w;
84
85 [--sp] = LC0;
86 [--sp] = LC1;
87 [--sp] = LT0;
88 [--sp] = LT1;
89 [--sp] = LB0;
90 [--sp] = LB1;
91
92 [--sp] = ASTAT;
93
94 [--sp] = r0; /* Skip reserved */
95 [--sp] = RETS;
96 r2 = RETI;
97 [--sp] = r2;
98 [--sp] = RETX;
99 [--sp] = RETN;
100 [--sp] = RETE;
101 [--sp] = SEQSTAT;
102 [--sp] = r1; /* IPEND - R1 may or may not be set up before jumping here. */
103
104 /* Switch to other method of keeping interrupts disabled. */
105 #ifdef CONFIG_DEBUG_HWERR
106 r1 = 0x3f;
107 sti r1;
108 #else
109 cli r1;
110 #endif
111 [--sp] = RETI; /* orig_pc */
112 /* Clear all L registers. */
113 r1 = 0 (x);
114 l0 = r1;
115 l1 = r1;
116 l2 = r1;
117 l3 = r1;
118 #ifdef CONFIG_FRAME_POINTER
119 fp = 0;
120 #endif
121
122 #if ANOMALY_05000283 || ANOMALY_05000315
123 cc = r7 == r7;
124 p5.h = HI(CHIPID);
125 p5.l = LO(CHIPID);
126 if cc jump 1f;
127 r7.l = W[p5];
128 1:
129 #endif
130 r1 = sp;
131 SP += -12;
132 #ifdef CONFIG_IPIPE
133 call ___ipipe_grab_irq
134 SP += 12;
135 cc = r0 == 0;
136 if cc jump .Lcommon_restore_context;
137 #else /* CONFIG_IPIPE */
138 call _do_irq;
139 SP += 12;
140 #endif /* CONFIG_IPIPE */
141 call _return_from_int;
142 .Lcommon_restore_context:
143 RESTORE_CONTEXT
144 rti;
145
146 /* interrupt routine for ivhw - 5 */
147 ENTRY(_evt_ivhw)
148 /* In case a single action kicks off multiple memory transactions, (like
149 * a cache line fetch, - this can cause multiple hardware errors, let's
150 * catch them all. First - make sure all the actions are complete, and
151 * the core sees the hardware errors.
152 */
153 SSYNC;
154 SSYNC;
155
156 SAVE_ALL_SYS
157 #ifdef CONFIG_FRAME_POINTER
158 fp = 0;
159 #endif
160
161 #if ANOMALY_05000283 || ANOMALY_05000315
162 cc = r7 == r7;
163 p5.h = HI(CHIPID);
164 p5.l = LO(CHIPID);
165 if cc jump 1f;
166 r7.l = W[p5];
167 1:
168 #endif
169
170 /* Handle all stacked hardware errors
171 * To make sure we don't hang forever, only do it 10 times
172 */
173 R0 = 0;
174 R2 = 10;
175 1:
176 P0.L = LO(ILAT);
177 P0.H = HI(ILAT);
178 R1 = [P0];
179 CC = BITTST(R1, EVT_IVHW_P);
180 IF ! CC JUMP 2f;
181 /* OK a hardware error is pending - clear it */
182 R1 = EVT_IVHW_P;
183 [P0] = R1;
184 R0 += 1;
185 CC = R1 == R2;
186 if CC JUMP 2f;
187 JUMP 1b;
188 2:
189 # We are going to dump something out, so make sure we print IPEND properly
190 p2.l = lo(IPEND);
191 p2.h = hi(IPEND);
192 r0 = [p2];
193 [sp + PT_IPEND] = r0;
194
195 /* set the EXCAUSE to HWERR for trap_c */
196 r0 = [sp + PT_SEQSTAT];
197 R1.L = LO(VEC_HWERR);
198 R1.H = HI(VEC_HWERR);
199 R0 = R0 | R1;
200 [sp + PT_SEQSTAT] = R0;
201
202 r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */
203 SP += -12;
204 call _trap_c;
205 SP += 12;
206
207 #ifdef EBIU_ERRMST
208 /* make sure EBIU_ERRMST is clear */
209 p0.l = LO(EBIU_ERRMST);
210 p0.h = HI(EBIU_ERRMST);
211 r0.l = (CORE_ERROR | CORE_MERROR);
212 w[p0] = r0.l;
213 #endif
214
215 call _ret_from_exception;
216
217 .Lcommon_restore_all_sys:
218 RESTORE_ALL_SYS
219 rti;
220 ENDPROC(_evt_ivhw)
221
222 /* Interrupt routine for evt2 (NMI).
223 * We don't actually use this, so just return.
224 * For inner circle type details, please see:
225 * http://docs.blackfin.uclinux.org/doku.php?id=linux-kernel:nmi
226 */
227 ENTRY(_evt_nmi)
228 .weak _evt_nmi
229 rtn;
230 ENDPROC(_evt_nmi)
231
232 /* interrupt routine for core timer - 6 */
233 ENTRY(_evt_timer)
234 TIMER_INTERRUPT_ENTRY(EVT_IVTMR_P)
235
236 /* interrupt routine for evt7 - 7 */
237 ENTRY(_evt_evt7)
238 INTERRUPT_ENTRY(EVT_IVG7_P)
239 ENTRY(_evt_evt8)
240 INTERRUPT_ENTRY(EVT_IVG8_P)
241 ENTRY(_evt_evt9)
242 INTERRUPT_ENTRY(EVT_IVG9_P)
243 ENTRY(_evt_evt10)
244 INTERRUPT_ENTRY(EVT_IVG10_P)
245 ENTRY(_evt_evt11)
246 INTERRUPT_ENTRY(EVT_IVG11_P)
247 ENTRY(_evt_evt12)
248 INTERRUPT_ENTRY(EVT_IVG12_P)
249 ENTRY(_evt_evt13)
250 INTERRUPT_ENTRY(EVT_IVG13_P)
251
252
253 /* interrupt routine for system_call - 15 */
254 ENTRY(_evt_system_call)
255 SAVE_CONTEXT_SYSCALL
256 #ifdef CONFIG_FRAME_POINTER
257 fp = 0;
258 #endif
259 call _system_call;
260 jump .Lcommon_restore_context;
261 ENDPROC(_evt_system_call)
262
263 #ifdef CONFIG_IPIPE
264 ENTRY(___ipipe_call_irqtail)
265 p0 = r0;
266 r0.l = 1f;
267 r0.h = 1f;
268 reti = r0;
269 rti;
270 1:
271 [--sp] = rets;
272 [--sp] = ( r7:4, p5:3 );
273 sp += -12;
274 call (p0);
275 sp += 12;
276 ( r7:4, p5:3 ) = [sp++];
277 rets = [sp++];
278
279 [--sp] = reti;
280 reti = [sp++]; /* IRQs are off. */
281 r0.h = 3f;
282 r0.l = 3f;
283 p0.l = lo(EVT14);
284 p0.h = hi(EVT14);
285 [p0] = r0;
286 csync;
287 r0 = 0x401f (z);
288 sti r0;
289 raise 14;
290 [--sp] = reti; /* IRQs on. */
291 2:
292 jump 2b; /* Likely paranoid. */
293 3:
294 sp += 4; /* Discard saved RETI */
295 r0.h = _evt14_softirq;
296 r0.l = _evt14_softirq;
297 p0.l = lo(EVT14);
298 p0.h = hi(EVT14);
299 [p0] = r0;
300 csync;
301 p0.l = _bfin_irq_flags;
302 p0.h = _bfin_irq_flags;
303 r0 = [p0];
304 sti r0;
305 rts;
306 ENDPROC(___ipipe_call_irqtail)
307
308 #endif /* CONFIG_IPIPE */
This page took 0.038544 seconds and 5 git commands to generate.