Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/teigland/dlm
[deliverable/linux.git] / arch / h8300 / kernel / entry.S
1 /* -*- mode: asm -*-
2 *
3 * linux/arch/h8300/platform/h8300h/entry.S
4 *
5 * Yoshinori Sato <ysato@users.sourceforge.jp>
6 * David McCullough <davidm@snapgear.com>
7 *
8 */
9
10 /*
11 * entry.S
12 * include exception/interrupt gateway
13 * system call entry
14 */
15
16 #include <linux/sys.h>
17 #include <asm/unistd.h>
18 #include <asm/setup.h>
19 #include <asm/segment.h>
20 #include <asm/linkage.h>
21 #include <asm/asm-offsets.h>
22 #include <asm/thread_info.h>
23 #include <asm/errno.h>
24
25 #if defined(CONFIG_CPU_H8300H)
26 #define USERRET 8
27 INTERRUPTS = 64
28 .h8300h
29 .macro SHLL2 reg
30 shll.l \reg
31 shll.l \reg
32 .endm
33 .macro SHLR2 reg
34 shlr.l \reg
35 shlr.l \reg
36 .endm
37 .macro SAVEREGS
38 mov.l er0,@-sp
39 mov.l er1,@-sp
40 mov.l er2,@-sp
41 mov.l er3,@-sp
42 .endm
43 .macro RESTOREREGS
44 mov.l @sp+,er3
45 mov.l @sp+,er2
46 .endm
47 .macro SAVEEXR
48 .endm
49 .macro RESTOREEXR
50 .endm
51 #endif
52 #if defined(CONFIG_CPU_H8S)
53 #define USERRET 10
54 #define USEREXR 8
55 INTERRUPTS = 128
56 .h8300s
57 .macro SHLL2 reg
58 shll.l #2,\reg
59 .endm
60 .macro SHLR2 reg
61 shlr.l #2,\reg
62 .endm
63 .macro SAVEREGS
64 stm.l er0-er3,@-sp
65 .endm
66 .macro RESTOREREGS
67 ldm.l @sp+,er2-er3
68 .endm
69 .macro SAVEEXR
70 mov.w @(USEREXR:16,er0),r1
71 mov.w r1,@(LEXR-LER3:16,sp) /* copy EXR */
72 .endm
73 .macro RESTOREEXR
74 mov.w @(LEXR-LER1:16,sp),r1 /* restore EXR */
75 mov.b r1l,r1h
76 mov.w r1,@(USEREXR:16,er0)
77 .endm
78 #endif
79
80
81 /* CPU context save/restore macros. */
82
83 .macro SAVE_ALL
84 mov.l er0,@-sp
85 stc ccr,r0l /* check kernel mode */
86 btst #4,r0l
87 bne 5f
88
89 /* user mode */
90 mov.l sp,@SYMBOL_NAME(sw_usp)
91 mov.l @sp,er0 /* restore saved er0 */
92 orc #0x10,ccr /* switch kernel stack */
93 mov.l @SYMBOL_NAME(sw_ksp),sp
94 sub.l #(LRET-LORIG),sp /* allocate LORIG - LRET */
95 SAVEREGS
96 mov.l @SYMBOL_NAME(sw_usp),er0
97 mov.l @(USERRET:16,er0),er1 /* copy the RET addr */
98 mov.l er1,@(LRET-LER3:16,sp)
99 SAVEEXR
100
101 mov.l @(LORIG-LER3:16,sp),er0
102 mov.l er0,@(LER0-LER3:16,sp) /* copy ER0 */
103 mov.w e1,r1 /* e1 highbyte = ccr */
104 and #0xef,r1h /* mask mode? flag */
105 bra 6f
106 5:
107 /* kernel mode */
108 mov.l @sp,er0 /* restore saved er0 */
109 subs #2,sp /* set dummy ccr */
110 SAVEREGS
111 mov.w @(LRET-LER3:16,sp),r1 /* copy old ccr */
112 6:
113 mov.b r1h,r1l
114 mov.b #0,r1h
115 mov.w r1,@(LCCR-LER3:16,sp) /* set ccr */
116 mov.l er6,@-sp /* syscall arg #6 */
117 mov.l er5,@-sp /* syscall arg #5 */
118 mov.l er4,@-sp /* syscall arg #4 */
119 .endm /* r1 = ccr */
120
121 .macro RESTORE_ALL
122 mov.l @sp+,er4
123 mov.l @sp+,er5
124 mov.l @sp+,er6
125 RESTOREREGS
126 mov.w @(LCCR-LER1:16,sp),r0 /* check kernel mode */
127 btst #4,r0l
128 bne 7f
129
130 orc #0x80,ccr
131 mov.l @SYMBOL_NAME(sw_usp),er0
132 mov.l @(LER0-LER1:16,sp),er1 /* restore ER0 */
133 mov.l er1,@er0
134 RESTOREEXR
135 mov.w @(LCCR-LER1:16,sp),r1 /* restore the RET addr */
136 mov.b r1l,r1h
137 mov.b @(LRET+1-LER1:16,sp),r1l
138 mov.w r1,e1
139 mov.w @(LRET+2-LER1:16,sp),r1
140 mov.l er1,@(USERRET:16,er0)
141
142 mov.l @sp+,er1
143 add.l #(LRET-LER1),sp /* remove LORIG - LRET */
144 mov.l sp,@SYMBOL_NAME(sw_ksp)
145 andc #0xef,ccr /* switch to user mode */
146 mov.l er0,sp
147 bra 8f
148 7:
149 mov.l @sp+,er1
150 adds #4,sp
151 adds #2,sp
152 8:
153 mov.l @sp+,er0
154 adds #4,sp /* remove the sw created LVEC */
155 rte
156 .endm
157
158 .globl SYMBOL_NAME(system_call)
159 .globl SYMBOL_NAME(ret_from_exception)
160 .globl SYMBOL_NAME(ret_from_fork)
161 .globl SYMBOL_NAME(ret_from_interrupt)
162 .globl SYMBOL_NAME(interrupt_redirect_table)
163 .globl SYMBOL_NAME(sw_ksp),SYMBOL_NAME(sw_usp)
164 .globl SYMBOL_NAME(resume)
165 .globl SYMBOL_NAME(interrupt_entry)
166 .globl SYMBOL_NAME(trace_break)
167
168 #if defined(CONFIG_ROMKERNEL)
169 .section .int_redirect,"ax"
170 SYMBOL_NAME_LABEL(interrupt_redirect_table)
171 #if defined(CONFIG_CPU_H8300H)
172 .rept 7
173 .long 0
174 .endr
175 #endif
176 #if defined(CONFIG_CPU_H8S)
177 .rept 5
178 .long 0
179 .endr
180 jmp @SYMBOL_NAME(trace_break)
181 .long 0
182 #endif
183
184 jsr @SYMBOL_NAME(interrupt_entry) /* NMI */
185 jmp @SYMBOL_NAME(system_call) /* TRAPA #0 (System call) */
186 .long 0
187 .long 0
188 jmp @SYMBOL_NAME(trace_break) /* TRAPA #3 (breakpoint) */
189 .rept INTERRUPTS-12
190 jsr @SYMBOL_NAME(interrupt_entry)
191 .endr
192 #endif
193 #if defined(CONFIG_RAMKERNEL)
194 .globl SYMBOL_NAME(interrupt_redirect_table)
195 .section .bss
196 SYMBOL_NAME_LABEL(interrupt_redirect_table)
197 .space 4
198 #endif
199
200 .section .text
201 .align 2
202 SYMBOL_NAME_LABEL(interrupt_entry)
203 SAVE_ALL
204 mov.l sp,er0
205 add.l #LVEC,er0
206 btst #4,r1l
207 bne 1f
208 /* user LVEC */
209 mov.l @SYMBOL_NAME(sw_usp),er0
210 adds #4,er0
211 1:
212 mov.l @er0,er0 /* LVEC address */
213 #if defined(CONFIG_ROMKERNEL)
214 sub.l #SYMBOL_NAME(interrupt_redirect_table),er0
215 #endif
216 #if defined(CONFIG_RAMKERNEL)
217 mov.l @SYMBOL_NAME(interrupt_redirect_table),er1
218 sub.l er1,er0
219 #endif
220 SHLR2 er0
221 dec.l #1,er0
222 mov.l sp,er1
223 subs #4,er1 /* adjust ret_pc */
224 jsr @SYMBOL_NAME(do_IRQ)
225 jmp @SYMBOL_NAME(ret_from_interrupt)
226
227 SYMBOL_NAME_LABEL(system_call)
228 subs #4,sp /* dummy LVEC */
229 SAVE_ALL
230 andc #0x7f,ccr
231 mov.l er0,er4
232
233 /* save top of frame */
234 mov.l sp,er0
235 jsr @SYMBOL_NAME(set_esp0)
236 mov.l sp,er2
237 and.w #0xe000,r2
238 mov.b @((TI_FLAGS+3-(TIF_SYSCALL_TRACE >> 3)):16,er2),r2l
239 btst #(TIF_SYSCALL_TRACE & 7),r2l
240 beq 1f
241 jsr @SYMBOL_NAME(do_syscall_trace)
242 1:
243 cmp.l #NR_syscalls,er4
244 bcc badsys
245 SHLL2 er4
246 mov.l #SYMBOL_NAME(sys_call_table),er0
247 add.l er4,er0
248 mov.l @er0,er4
249 beq SYMBOL_NAME(ret_from_exception):16
250 mov.l @(LER1:16,sp),er0
251 mov.l @(LER2:16,sp),er1
252 mov.l @(LER3:16,sp),er2
253 jsr @er4
254 mov.l er0,@(LER0:16,sp) /* save the return value */
255 mov.l sp,er2
256 and.w #0xe000,r2
257 mov.b @((TI_FLAGS+3-(TIF_SYSCALL_TRACE >> 3)):16,er2),r2l
258 btst #(TIF_SYSCALL_TRACE & 7),r2l
259 beq 2f
260 jsr @SYMBOL_NAME(do_syscall_trace)
261 2:
262 #if defined(CONFIG_SYSCALL_PRINT)
263 jsr @SYMBOL_NAME(syscall_print)
264 #endif
265 orc #0x80,ccr
266 bra resume_userspace
267
268 badsys:
269 mov.l #-ENOSYS,er0
270 mov.l er0,@(LER0:16,sp)
271 bra resume_userspace
272
273 #if !defined(CONFIG_PREEMPT)
274 #define resume_kernel restore_all
275 #endif
276
277 SYMBOL_NAME_LABEL(ret_from_exception)
278 #if defined(CONFIG_PREEMPT)
279 orc #0x80,ccr
280 #endif
281 SYMBOL_NAME_LABEL(ret_from_interrupt)
282 mov.b @(LCCR+1:16,sp),r0l
283 btst #4,r0l
284 bne resume_kernel:8 /* return from kernel */
285 resume_userspace:
286 andc #0x7f,ccr
287 mov.l sp,er4
288 and.w #0xe000,r4 /* er4 <- current thread info */
289 mov.l @(TI_FLAGS:16,er4),er1
290 and.l #_TIF_WORK_MASK,er1
291 beq restore_all:8
292 work_pending:
293 btst #TIF_NEED_RESCHED,r1l
294 bne work_resched:8
295 /* work notifysig */
296 mov.l sp,er0
297 subs #4,er0 /* er0: pt_regs */
298 jsr @SYMBOL_NAME(do_notify_resume)
299 bra restore_all:8
300 work_resched:
301 mov.l sp,er0
302 jsr @SYMBOL_NAME(set_esp0)
303 jsr @SYMBOL_NAME(schedule)
304 bra resume_userspace:8
305 restore_all:
306 RESTORE_ALL /* Does RTE */
307
308 #if defined(CONFIG_PREEMPT)
309 resume_kernel:
310 mov.l @(TI_PRE_COUNT:16,er4),er0
311 bne restore_all:8
312 need_resched:
313 mov.l @(TI_FLAGS:16,er4),er0
314 btst #TIF_NEED_RESCHED,r0l
315 beq restore_all:8
316 mov.b @(LCCR+1:16,sp),r0l /* Interrupt Enabled? */
317 bmi restore_all:8
318 mov.l #PREEMPT_ACTIVE,er0
319 mov.l er0,@(TI_PRE_COUNT:16,er4)
320 andc #0x7f,ccr
321 mov.l sp,er0
322 jsr @SYMBOL_NAME(set_esp0)
323 jsr @SYMBOL_NAME(schedule)
324 orc #0x80,ccr
325 bra need_resched:8
326 #endif
327
328 SYMBOL_NAME_LABEL(ret_from_fork)
329 mov.l er2,er0
330 jsr @SYMBOL_NAME(schedule_tail)
331 jmp @SYMBOL_NAME(ret_from_exception)
332
333 SYMBOL_NAME_LABEL(resume)
334 /*
335 * Beware - when entering resume, offset of tss is in d1,
336 * prev (the current task) is in a0, next (the new task)
337 * is in a1 and d2.b is non-zero if the mm structure is
338 * shared between the tasks, so don't change these
339 * registers until their contents are no longer needed.
340 */
341
342 /* save sr */
343 sub.w r3,r3
344 stc ccr,r3l
345 mov.w r3,@(THREAD_CCR+2:16,er0)
346
347 /* disable interrupts */
348 orc #0x80,ccr
349 mov.l @SYMBOL_NAME(sw_usp),er3
350 mov.l er3,@(THREAD_USP:16,er0)
351 mov.l sp,@(THREAD_KSP:16,er0)
352
353 /* Skip address space switching if they are the same. */
354 /* FIXME: what did we hack out of here, this does nothing! */
355
356 mov.l @(THREAD_USP:16,er1),er0
357 mov.l er0,@SYMBOL_NAME(sw_usp)
358 mov.l @(THREAD_KSP:16,er1),sp
359
360 /* restore status register */
361 mov.w @(THREAD_CCR+2:16,er1),r3
362
363 ldc r3l,ccr
364 rts
365
366 SYMBOL_NAME_LABEL(trace_break)
367 subs #4,sp
368 SAVE_ALL
369 sub.l er1,er1
370 dec.l #1,er1
371 mov.l er1,@(LORIG,sp)
372 mov.l sp,er0
373 jsr @SYMBOL_NAME(set_esp0)
374 mov.l @SYMBOL_NAME(sw_usp),er0
375 mov.l @er0,er1
376 mov.w @(-2:16,er1),r2
377 cmp.w #0x5730,r2
378 beq 1f
379 subs #2,er1
380 mov.l er1,@er0
381 1:
382 and.w #0xff,e1
383 mov.l er1,er0
384 jsr @SYMBOL_NAME(trace_trap)
385 jmp @SYMBOL_NAME(ret_from_exception)
386
387 .section .bss
388 SYMBOL_NAME_LABEL(sw_ksp)
389 .space 4
390 SYMBOL_NAME_LABEL(sw_usp)
391 .space 4
392
393 .end
This page took 0.040636 seconds and 5 git commands to generate.