KVM: add support for change_pte mmu notifiers
[deliverable/linux.git] / arch / m68knommu / platform / 68328 / entry.S
1 /*
2 * linux/arch/m68knommu/platform/68328/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file README.legal in the main directory of this archive
8 * for more details.
9 *
10 * Linux/m68k support by Hamish Macdonald
11 */
12
13 #include <linux/sys.h>
14 #include <linux/linkage.h>
15 #include <asm/thread_info.h>
16 #include <asm/unistd.h>
17 #include <asm/errno.h>
18 #include <asm/setup.h>
19 #include <asm/segment.h>
20 #include <asm/traps.h>
21 #include <asm/asm-offsets.h>
22 #include <asm/entry.h>
23
24 .text
25
26 .globl system_call
27 .globl resume
28 .globl ret_from_exception
29 .globl ret_from_signal
30 .globl sys_call_table
31 .globl ret_from_interrupt
32 .globl bad_interrupt
33 .globl inthandler1
34 .globl inthandler2
35 .globl inthandler3
36 .globl inthandler4
37 .globl inthandler5
38 .globl inthandler6
39 .globl inthandler7
40
41 badsys:
42 movel #-ENOSYS,%sp@(PT_D0)
43 jra ret_from_exception
44
45 do_trace:
46 movel #-ENOSYS,%sp@(PT_D0) /* needed for strace*/
47 subql #4,%sp
48 SAVE_SWITCH_STACK
49 jbsr syscall_trace
50 RESTORE_SWITCH_STACK
51 addql #4,%sp
52 movel %sp@(PT_ORIG_D0),%d1
53 movel #-ENOSYS,%d0
54 cmpl #NR_syscalls,%d1
55 jcc 1f
56 lsl #2,%d1
57 lea sys_call_table, %a0
58 jbsr %a0@(%d1)
59
60 1: movel %d0,%sp@(PT_D0) /* save the return value */
61 subql #4,%sp /* dummy return address */
62 SAVE_SWITCH_STACK
63 jbsr syscall_trace
64
65 ret_from_signal:
66 RESTORE_SWITCH_STACK
67 addql #4,%sp
68 jra ret_from_exception
69
70 ENTRY(system_call)
71 SAVE_ALL
72
73 /* save top of frame*/
74 pea %sp@
75 jbsr set_esp0
76 addql #4,%sp
77
78 movel %sp@(PT_ORIG_D0),%d0
79
80 movel %sp,%d1 /* get thread_info pointer */
81 andl #-THREAD_SIZE,%d1
82 movel %d1,%a2
83 btst #TIF_SYSCALL_TRACE,%a2@(TI_FLAGS)
84 jne do_trace
85 cmpl #NR_syscalls,%d0
86 jcc badsys
87 lsl #2,%d0
88 lea sys_call_table,%a0
89 movel %a0@(%d0), %a0
90 jbsr %a0@
91 movel %d0,%sp@(PT_D0) /* save the return value*/
92
93 ret_from_exception:
94 btst #5,%sp@(PT_SR) /* check if returning to kernel*/
95 jeq Luser_return /* if so, skip resched, signals*/
96
97 Lkernel_return:
98 RESTORE_ALL
99
100 Luser_return:
101 /* only allow interrupts when we are really the last one on the*/
102 /* kernel stack, otherwise stack overflow can occur during*/
103 /* heavy interrupt load*/
104 andw #ALLOWINT,%sr
105
106 movel %sp,%d1 /* get thread_info pointer */
107 andl #-THREAD_SIZE,%d1
108 movel %d1,%a2
109 move %a2@(TI_FLAGS),%d1 /* thread_info->flags */
110 andl #_TIF_WORK_MASK,%d1
111 jne Lwork_to_do
112 RESTORE_ALL
113
114 Lwork_to_do:
115 movel %a2@(TI_FLAGS),%d1 /* thread_info->flags */
116 btst #TIF_NEED_RESCHED,%d1
117 jne reschedule
118
119 Lsignal_return:
120 subql #4,%sp /* dummy return address*/
121 SAVE_SWITCH_STACK
122 pea %sp@(SWITCH_STACK_SIZE)
123 clrl %sp@-
124 bsrw do_signal
125 addql #8,%sp
126 RESTORE_SWITCH_STACK
127 addql #4,%sp
128 Lreturn:
129 RESTORE_ALL
130
131 /*
132 * This is the main interrupt handler, responsible for calling process_int()
133 */
134 inthandler1:
135 SAVE_ALL
136 movew %sp@(PT_VECTOR), %d0
137 and #0x3ff, %d0
138
139 movel %sp,%sp@-
140 movel #65,%sp@- /* put vector # on stack*/
141 jbsr process_int /* process the IRQ*/
142 3: addql #8,%sp /* pop parameters off stack*/
143 bra ret_from_interrupt
144
145 inthandler2:
146 SAVE_ALL
147 movew %sp@(PT_VECTOR), %d0
148 and #0x3ff, %d0
149
150 movel %sp,%sp@-
151 movel #66,%sp@- /* put vector # on stack*/
152 jbsr process_int /* process the IRQ*/
153 3: addql #8,%sp /* pop parameters off stack*/
154 bra ret_from_interrupt
155
156 inthandler3:
157 SAVE_ALL
158 movew %sp@(PT_VECTOR), %d0
159 and #0x3ff, %d0
160
161 movel %sp,%sp@-
162 movel #67,%sp@- /* put vector # on stack*/
163 jbsr process_int /* process the IRQ*/
164 3: addql #8,%sp /* pop parameters off stack*/
165 bra ret_from_interrupt
166
167 inthandler4:
168 SAVE_ALL
169 movew %sp@(PT_VECTOR), %d0
170 and #0x3ff, %d0
171
172 movel %sp,%sp@-
173 movel #68,%sp@- /* put vector # on stack*/
174 jbsr process_int /* process the IRQ*/
175 3: addql #8,%sp /* pop parameters off stack*/
176 bra ret_from_interrupt
177
178 inthandler5:
179 SAVE_ALL
180 movew %sp@(PT_VECTOR), %d0
181 and #0x3ff, %d0
182
183 movel %sp,%sp@-
184 movel #69,%sp@- /* put vector # on stack*/
185 jbsr process_int /* process the IRQ*/
186 3: addql #8,%sp /* pop parameters off stack*/
187 bra ret_from_interrupt
188
189 inthandler6:
190 SAVE_ALL
191 movew %sp@(PT_VECTOR), %d0
192 and #0x3ff, %d0
193
194 movel %sp,%sp@-
195 movel #70,%sp@- /* put vector # on stack*/
196 jbsr process_int /* process the IRQ*/
197 3: addql #8,%sp /* pop parameters off stack*/
198 bra ret_from_interrupt
199
200 inthandler7:
201 SAVE_ALL
202 movew %sp@(PT_VECTOR), %d0
203 and #0x3ff, %d0
204
205 movel %sp,%sp@-
206 movel #71,%sp@- /* put vector # on stack*/
207 jbsr process_int /* process the IRQ*/
208 3: addql #8,%sp /* pop parameters off stack*/
209 bra ret_from_interrupt
210
211 inthandler:
212 SAVE_ALL
213 movew %sp@(PT_VECTOR), %d0
214 and #0x3ff, %d0
215
216 movel %sp,%sp@-
217 movel %d0,%sp@- /* put vector # on stack*/
218 jbsr process_int /* process the IRQ*/
219 3: addql #8,%sp /* pop parameters off stack*/
220 bra ret_from_interrupt
221
222 ret_from_interrupt:
223 jeq 1f
224 2:
225 RESTORE_ALL
226 1:
227 moveb %sp@(PT_SR), %d0
228 and #7, %d0
229 jhi 2b
230
231 /* check if we need to do software interrupts */
232 jeq ret_from_exception
233
234 pea ret_from_exception
235 jra do_softirq
236
237
238 /*
239 * Handler for uninitialized and spurious interrupts.
240 */
241 ENTRY(bad_interrupt)
242 addql #1,num_spurious
243 rte
244
245 /*
246 * Beware - when entering resume, prev (the current task) is
247 * in a0, next (the new task) is in a1,so don't change these
248 * registers until their contents are no longer needed.
249 */
250 ENTRY(resume)
251 movel %a0,%d1 /* save prev thread in d1 */
252 movew %sr,%a0@(TASK_THREAD+THREAD_SR) /* save sr */
253 movel %usp,%a2 /* save usp */
254 movel %a2,%a0@(TASK_THREAD+THREAD_USP)
255
256 SAVE_SWITCH_STACK
257 movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack */
258 movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new thread stack */
259 RESTORE_SWITCH_STACK
260
261 movel %a1@(TASK_THREAD+THREAD_USP),%a0 /* restore user stack */
262 movel %a0,%usp
263 movew %a1@(TASK_THREAD+THREAD_SR),%sr /* restore thread status reg */
264 rts
265
This page took 0.053262 seconds and 5 git commands to generate.