Merge branch 'kvm-updates/2.6.32' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[deliverable/linux.git] / arch / m68knommu / platform / 68360 / entry.S
1 /*
2 * linux/arch/m68knommu/platform/68360/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2001 SED Systems, a Division of Calian Ltd.
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file README.legal in the main directory of this archive
9 * for more details.
10 *
11 * Linux/m68k support by Hamish Macdonald
12 * M68360 Port by SED Systems, and Lineo.
13 */
14
15 #include <linux/sys.h>
16 #include <linux/linkage.h>
17 #include <asm/thread_info.h>
18 #include <asm/unistd.h>
19 #include <asm/errno.h>
20 #include <asm/setup.h>
21 #include <asm/segment.h>
22 #include <asm/traps.h>
23 #include <asm/asm-offsets.h>
24 #include <asm/entry.h>
25
26 .text
27
28 .globl system_call
29 .globl resume
30 .globl ret_from_exception
31 .globl ret_from_signal
32 .globl sys_call_table
33 .globl ret_from_interrupt
34 .globl bad_interrupt
35 .globl inthandler
36
37 badsys:
38 movel #-ENOSYS,%sp@(PT_OFF_D0)
39 jra ret_from_exception
40
41 do_trace:
42 movel #-ENOSYS,%sp@(PT_OFF_D0) /* needed for strace*/
43 subql #4,%sp
44 SAVE_SWITCH_STACK
45 jbsr syscall_trace
46 RESTORE_SWITCH_STACK
47 addql #4,%sp
48 movel %sp@(PT_OFF_ORIG_D0),%d1
49 movel #-ENOSYS,%d0
50 cmpl #NR_syscalls,%d1
51 jcc 1f
52 lsl #2,%d1
53 lea sys_call_table, %a0
54 jbsr %a0@(%d1)
55
56 1: movel %d0,%sp@(PT_OFF_D0) /* save the return value */
57 subql #4,%sp /* dummy return address */
58 SAVE_SWITCH_STACK
59 jbsr syscall_trace
60
61 ret_from_signal:
62 RESTORE_SWITCH_STACK
63 addql #4,%sp
64 jra ret_from_exception
65
66 ENTRY(system_call)
67 SAVE_ALL
68
69 /* save top of frame*/
70 pea %sp@
71 jbsr set_esp0
72 addql #4,%sp
73
74 btst #PF_TRACESYS_BIT,%a2@(TASK_FLAGS+PF_TRACESYS_OFF)
75 jne do_trace
76 cmpl #NR_syscalls,%d0
77 jcc badsys
78 lsl #2,%d0
79 lea sys_call_table,%a0
80 movel %a0@(%d0), %a0
81 jbsr %a0@
82 movel %d0,%sp@(PT_OFF_D0) /* save the return value*/
83
84 ret_from_exception:
85 btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel*/
86 jeq Luser_return /* if so, skip resched, signals*/
87
88 Lkernel_return:
89 RESTORE_ALL
90
91 Luser_return:
92 /* only allow interrupts when we are really the last one on the*/
93 /* kernel stack, otherwise stack overflow can occur during*/
94 /* heavy interrupt load*/
95 andw #ALLOWINT,%sr
96
97 movel %sp,%d1 /* get thread_info pointer */
98 andl #-THREAD_SIZE,%d1
99 movel %d1,%a2
100 move %a2@(TI_FLAGS),%d1 /* thread_info->flags */
101 andl #_TIF_WORK_MASK,%d1
102 jne Lwork_to_do
103 RESTORE_ALL
104
105 Lwork_to_do:
106 movel %a2@(TI_FLAGS),%d1 /* thread_info->flags */
107 btst #TIF_NEED_RESCHED,%d1
108 jne reschedule
109
110 Lsignal_return:
111 subql #4,%sp /* dummy return address*/
112 SAVE_SWITCH_STACK
113 pea %sp@(SWITCH_STACK_SIZE)
114 clrl %sp@-
115 bsrw do_signal
116 addql #8,%sp
117 RESTORE_SWITCH_STACK
118 addql #4,%sp
119 Lreturn:
120 RESTORE_ALL
121
122 /*
123 * This is the main interrupt handler, responsible for calling do_IRQ()
124 */
125 inthandler:
126 SAVE_ALL
127 movew %sp@(PT_OFF_VECTOR), %d0
128 and.l #0x3ff, %d0
129 lsr.l #0x02, %d0
130
131 movel %sp,%sp@-
132 movel %d0,%sp@- /* put vector # on stack*/
133 jbsr do_IRQ /* process the IRQ*/
134 3: addql #8,%sp /* pop parameters off stack*/
135 bra ret_from_interrupt
136
137 ret_from_interrupt:
138 jeq 1f
139 2:
140 RESTORE_ALL
141 1:
142 moveb %sp@(PT_OFF_SR), %d0
143 and #7, %d0
144 jhi 2b
145 /* check if we need to do software interrupts */
146
147 movel irq_stat+CPUSTAT_SOFTIRQ_PENDING,%d0
148 jeq ret_from_exception
149
150 pea ret_from_exception
151 jra do_softirq
152
153
154 /*
155 * Handler for uninitialized and spurious interrupts.
156 */
157 bad_interrupt:
158 addql #1,num_spurious
159 rte
160
161 /*
162 * Beware - when entering resume, prev (the current task) is
163 * in a0, next (the new task) is in a1,so don't change these
164 * registers until their contents are no longer needed.
165 */
166 ENTRY(resume)
167 movel %a0,%d1 /* save prev thread in d1 */
168 movew %sr,%a0@(TASK_THREAD+THREAD_SR) /* save sr */
169 movel %usp,%a2 /* save usp */
170 movel %a2,%a0@(TASK_THREAD+THREAD_USP)
171
172 SAVE_SWITCH_STACK
173 movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack */
174 movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new thread stack */
175 RESTORE_SWITCH_STACK
176
177 movel %a1@(TASK_THREAD+THREAD_USP),%a0 /* restore user stack */
178 movel %a0,%usp
179 movew %a1@(TASK_THREAD+THREAD_SR),%sr /* restore thread status reg */
180 rts
181
This page took 0.036646 seconds and 5 git commands to generate.