6eeb635fab7e07d78737d4d82199382f14b3a3c2
[deliverable/linux.git] / arch / m68knommu / platform / 68328 / entry.S
1 /*
2 * linux/arch/m68knommu/platform/68328/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file README.legal in the main directory of this archive
8 * for more details.
9 *
10 * Linux/m68k support by Hamish Macdonald
11 */
12
13 #include <linux/sys.h>
14 #include <linux/linkage.h>
15 #include <asm/thread_info.h>
16 #include <asm/unistd.h>
17 #include <asm/errno.h>
18 #include <asm/setup.h>
19 #include <asm/segment.h>
20 #include <asm/traps.h>
21 #include <asm/asm-offsets.h>
22 #include <asm/entry.h>
23
24 .text
25
26 .globl system_call
27 .globl resume
28 .globl ret_from_exception
29 .globl ret_from_signal
30 .globl sys_call_table
31 .globl ret_from_interrupt
32 .globl bad_interrupt
33 .globl inthandler1
34 .globl inthandler2
35 .globl inthandler3
36 .globl inthandler4
37 .globl inthandler5
38 .globl inthandler6
39 .globl inthandler7
40
41 badsys:
42 movel #-ENOSYS,%sp@(PT_OFF_D0)
43 jra ret_from_exception
44
45 do_trace:
46 movel #-ENOSYS,%sp@(PT_OFF_D0) /* needed for strace*/
47 subql #4,%sp
48 SAVE_SWITCH_STACK
49 jbsr syscall_trace_enter
50 RESTORE_SWITCH_STACK
51 addql #4,%sp
52 movel %sp@(PT_OFF_ORIG_D0),%d1
53 movel #-ENOSYS,%d0
54 cmpl #NR_syscalls,%d1
55 jcc 1f
56 lsl #2,%d1
57 lea sys_call_table, %a0
58 jbsr %a0@(%d1)
59
60 1: movel %d0,%sp@(PT_OFF_D0) /* save the return value */
61 subql #4,%sp /* dummy return address */
62 SAVE_SWITCH_STACK
63 jbsr syscall_trace_leave
64
65 ret_from_signal:
66 RESTORE_SWITCH_STACK
67 addql #4,%sp
68 jra ret_from_exception
69
70 ENTRY(system_call)
71 SAVE_ALL
72
73 /* save top of frame*/
74 pea %sp@
75 jbsr set_esp0
76 addql #4,%sp
77
78 movel %sp@(PT_OFF_ORIG_D0),%d0
79
80 movel %sp,%d1 /* get thread_info pointer */
81 andl #-THREAD_SIZE,%d1
82 movel %d1,%a2
83 btst #(TIF_SYSCALL_TRACE%8),%a2@(TI_FLAGS+(31-TIF_SYSCALL_TRACE)/8)
84 jne do_trace
85 cmpl #NR_syscalls,%d0
86 jcc badsys
87 lsl #2,%d0
88 lea sys_call_table,%a0
89 movel %a0@(%d0), %a0
90 jbsr %a0@
91 movel %d0,%sp@(PT_OFF_D0) /* save the return value*/
92
93 ret_from_exception:
94 btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel*/
95 jeq Luser_return /* if so, skip resched, signals*/
96
97 Lkernel_return:
98 RESTORE_ALL
99
100 Luser_return:
101 /* only allow interrupts when we are really the last one on the*/
102 /* kernel stack, otherwise stack overflow can occur during*/
103 /* heavy interrupt load*/
104 andw #ALLOWINT,%sr
105
106 movel %sp,%d1 /* get thread_info pointer */
107 andl #-THREAD_SIZE,%d1
108 movel %d1,%a2
109 move %a2@(TI_FLAGS),%d1 /* thread_info->flags */
110 andl #_TIF_WORK_MASK,%d1
111 jne Lwork_to_do
112 RESTORE_ALL
113
114 Lwork_to_do:
115 movel %a2@(TI_FLAGS),%d1 /* thread_info->flags */
116 btst #TIF_NEED_RESCHED,%d1
117 jne reschedule
118
119 Lsignal_return:
120 subql #4,%sp /* dummy return address*/
121 SAVE_SWITCH_STACK
122 pea %sp@(SWITCH_STACK_SIZE)
123 bsrw do_signal
124 addql #4,%sp
125 RESTORE_SWITCH_STACK
126 addql #4,%sp
127 Lreturn:
128 RESTORE_ALL
129
130 /*
131 * This is the main interrupt handler, responsible for calling process_int()
132 */
133 inthandler1:
134 SAVE_ALL
135 movew %sp@(PT_OFF_FORMATVEC), %d0
136 and #0x3ff, %d0
137
138 movel %sp,%sp@-
139 movel #65,%sp@- /* put vector # on stack*/
140 jbsr process_int /* process the IRQ*/
141 3: addql #8,%sp /* pop parameters off stack*/
142 bra ret_from_interrupt
143
144 inthandler2:
145 SAVE_ALL
146 movew %sp@(PT_OFF_FORMATVEC), %d0
147 and #0x3ff, %d0
148
149 movel %sp,%sp@-
150 movel #66,%sp@- /* put vector # on stack*/
151 jbsr process_int /* process the IRQ*/
152 3: addql #8,%sp /* pop parameters off stack*/
153 bra ret_from_interrupt
154
155 inthandler3:
156 SAVE_ALL
157 movew %sp@(PT_OFF_FORMATVEC), %d0
158 and #0x3ff, %d0
159
160 movel %sp,%sp@-
161 movel #67,%sp@- /* put vector # on stack*/
162 jbsr process_int /* process the IRQ*/
163 3: addql #8,%sp /* pop parameters off stack*/
164 bra ret_from_interrupt
165
166 inthandler4:
167 SAVE_ALL
168 movew %sp@(PT_OFF_FORMATVEC), %d0
169 and #0x3ff, %d0
170
171 movel %sp,%sp@-
172 movel #68,%sp@- /* put vector # on stack*/
173 jbsr process_int /* process the IRQ*/
174 3: addql #8,%sp /* pop parameters off stack*/
175 bra ret_from_interrupt
176
177 inthandler5:
178 SAVE_ALL
179 movew %sp@(PT_OFF_FORMATVEC), %d0
180 and #0x3ff, %d0
181
182 movel %sp,%sp@-
183 movel #69,%sp@- /* put vector # on stack*/
184 jbsr process_int /* process the IRQ*/
185 3: addql #8,%sp /* pop parameters off stack*/
186 bra ret_from_interrupt
187
188 inthandler6:
189 SAVE_ALL
190 movew %sp@(PT_OFF_FORMATVEC), %d0
191 and #0x3ff, %d0
192
193 movel %sp,%sp@-
194 movel #70,%sp@- /* put vector # on stack*/
195 jbsr process_int /* process the IRQ*/
196 3: addql #8,%sp /* pop parameters off stack*/
197 bra ret_from_interrupt
198
199 inthandler7:
200 SAVE_ALL
201 movew %sp@(PT_OFF_FORMATVEC), %d0
202 and #0x3ff, %d0
203
204 movel %sp,%sp@-
205 movel #71,%sp@- /* put vector # on stack*/
206 jbsr process_int /* process the IRQ*/
207 3: addql #8,%sp /* pop parameters off stack*/
208 bra ret_from_interrupt
209
210 inthandler:
211 SAVE_ALL
212 movew %sp@(PT_OFF_FORMATVEC), %d0
213 and #0x3ff, %d0
214
215 movel %sp,%sp@-
216 movel %d0,%sp@- /* put vector # on stack*/
217 jbsr process_int /* process the IRQ*/
218 3: addql #8,%sp /* pop parameters off stack*/
219 bra ret_from_interrupt
220
221 ret_from_interrupt:
222 jeq 1f
223 2:
224 RESTORE_ALL
225 1:
226 moveb %sp@(PT_OFF_SR), %d0
227 and #7, %d0
228 jhi 2b
229
230 /* check if we need to do software interrupts */
231 jeq ret_from_exception
232
233 pea ret_from_exception
234 jra do_softirq
235
236
237 /*
238 * Handler for uninitialized and spurious interrupts.
239 */
240 ENTRY(bad_interrupt)
241 addql #1,num_spurious
242 rte
243
244 /*
245 * Beware - when entering resume, prev (the current task) is
246 * in a0, next (the new task) is in a1,so don't change these
247 * registers until their contents are no longer needed.
248 */
249 ENTRY(resume)
250 movel %a0,%d1 /* save prev thread in d1 */
251 movew %sr,%a0@(TASK_THREAD+THREAD_SR) /* save sr */
252 movel %usp,%a2 /* save usp */
253 movel %a2,%a0@(TASK_THREAD+THREAD_USP)
254
255 SAVE_SWITCH_STACK
256 movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack */
257 movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new thread stack */
258 RESTORE_SWITCH_STACK
259
260 movel %a1@(TASK_THREAD+THREAD_USP),%a0 /* restore user stack */
261 movel %a0,%usp
262 movew %a1@(TASK_THREAD+THREAD_SR),%sr /* restore thread status reg */
263 rts
264
This page took 0.041041 seconds and 5 git commands to generate.