Merge branch 'gpio/merge' of git://git.secretlab.ca/git/linux-2.6 into grant
[deliverable/linux.git] / arch / sparc / kernel / rtrap_32.S
1 /*
2 * rtrap.S: Return from Sparc trap low-level code.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7 #include <asm/page.h>
8 #include <asm/ptrace.h>
9 #include <asm/psr.h>
10 #include <asm/asi.h>
11 #include <asm/smp.h>
12 #include <asm/contregs.h>
13 #include <asm/winmacro.h>
14 #include <asm/asmmacro.h>
15 #include <asm/thread_info.h>
16
17 #define t_psr l0
18 #define t_pc l1
19 #define t_npc l2
20 #define t_wim l3
21 #define twin_tmp1 l4
22 #define glob_tmp g4
23 #define curptr g6
24
25 /* 7 WINDOW SPARC PATCH INSTRUCTIONS */
26 .globl rtrap_7win_patch1, rtrap_7win_patch2, rtrap_7win_patch3
27 .globl rtrap_7win_patch4, rtrap_7win_patch5
28 rtrap_7win_patch1: srl %t_wim, 0x6, %glob_tmp
29 rtrap_7win_patch2: and %glob_tmp, 0x7f, %glob_tmp
30 rtrap_7win_patch3: srl %g1, 7, %g2
31 rtrap_7win_patch4: srl %g2, 6, %g2
32 rtrap_7win_patch5: and %g1, 0x7f, %g1
33 /* END OF PATCH INSTRUCTIONS */
34
35 /* We need to check for a few things which are:
36 * 1) The need to call schedule() because this
37 * processes quantum is up.
38 * 2) Pending signals for this process, if any
39 * exist we need to call do_signal() to do
40 * the needy.
41 *
42 * Else we just check if the rett would land us
43 * in an invalid window, if so we need to grab
44 * it off the user/kernel stack first.
45 */
46
47 .globl ret_trap_entry, rtrap_patch1, rtrap_patch2
48 .globl rtrap_patch3, rtrap_patch4, rtrap_patch5
49 .globl ret_trap_lockless_ipi
50 ret_trap_entry:
51 ret_trap_lockless_ipi:
52 andcc %t_psr, PSR_PS, %g0
53 sethi %hi(PSR_SYSCALL), %g1
54 be 1f
55 andn %t_psr, %g1, %t_psr
56
57 wr %t_psr, 0x0, %psr
58 b ret_trap_kernel
59 nop
60
61 1:
62 ld [%curptr + TI_FLAGS], %g2
63 andcc %g2, (_TIF_NEED_RESCHED), %g0
64 be signal_p
65 nop
66
67 call schedule
68 nop
69
70 ld [%curptr + TI_FLAGS], %g2
71 signal_p:
72 andcc %g2, _TIF_DO_NOTIFY_RESUME_MASK, %g0
73 bz,a ret_trap_continue
74 ld [%sp + STACKFRAME_SZ + PT_PSR], %t_psr
75
76 mov %g2, %o2
77 mov %l5, %o1
78 call do_notify_resume
79 add %sp, STACKFRAME_SZ, %o0 ! pt_regs ptr
80
81 b signal_p
82 ld [%curptr + TI_FLAGS], %g2
83
84 ret_trap_continue:
85 sethi %hi(PSR_SYSCALL), %g1
86 andn %t_psr, %g1, %t_psr
87 wr %t_psr, 0x0, %psr
88 WRITE_PAUSE
89
90 ld [%curptr + TI_W_SAVED], %twin_tmp1
91 orcc %g0, %twin_tmp1, %g0
92 be ret_trap_nobufwins
93 nop
94
95 wr %t_psr, PSR_ET, %psr
96 WRITE_PAUSE
97
98 mov 1, %o1
99 call try_to_clear_window_buffer
100 add %sp, STACKFRAME_SZ, %o0
101
102 b signal_p
103 ld [%curptr + TI_FLAGS], %g2
104
105 ret_trap_nobufwins:
106 /* Load up the user's out registers so we can pull
107 * a window from the stack, if necessary.
108 */
109 LOAD_PT_INS(sp)
110
111 /* If there are already live user windows in the
112 * set we can return from trap safely.
113 */
114 ld [%curptr + TI_UWINMASK], %twin_tmp1
115 orcc %g0, %twin_tmp1, %g0
116 bne ret_trap_userwins_ok
117 nop
118
119 /* Calculate new %wim, we have to pull a register
120 * window from the users stack.
121 */
122 ret_trap_pull_one_window:
123 rd %wim, %t_wim
124 sll %t_wim, 0x1, %twin_tmp1
125 rtrap_patch1: srl %t_wim, 0x7, %glob_tmp
126 or %glob_tmp, %twin_tmp1, %glob_tmp
127 rtrap_patch2: and %glob_tmp, 0xff, %glob_tmp
128
129 wr %glob_tmp, 0x0, %wim
130
131 /* Here comes the architecture specific
132 * branch to the user stack checking routine
133 * for return from traps.
134 */
135 b srmmu_rett_stackchk
136 andcc %fp, 0x7, %g0
137
138 ret_trap_userwins_ok:
139 LOAD_PT_PRIV(sp, t_psr, t_pc, t_npc)
140 or %t_pc, %t_npc, %g2
141 andcc %g2, 0x3, %g0
142 sethi %hi(PSR_SYSCALL), %g2
143 be 1f
144 andn %t_psr, %g2, %t_psr
145
146 b ret_trap_unaligned_pc
147 add %sp, STACKFRAME_SZ, %o0
148
149 1:
150 LOAD_PT_YREG(sp, g1)
151 LOAD_PT_GLOBALS(sp)
152
153 wr %t_psr, 0x0, %psr
154 WRITE_PAUSE
155
156 jmp %t_pc
157 rett %t_npc
158
159 ret_trap_unaligned_pc:
160 ld [%sp + STACKFRAME_SZ + PT_PC], %o1
161 ld [%sp + STACKFRAME_SZ + PT_NPC], %o2
162 ld [%sp + STACKFRAME_SZ + PT_PSR], %o3
163
164 wr %t_wim, 0x0, %wim ! or else...
165
166 wr %t_psr, PSR_ET, %psr
167 WRITE_PAUSE
168
169 call do_memaccess_unaligned
170 nop
171
172 b signal_p
173 ld [%curptr + TI_FLAGS], %g2
174
175 ret_trap_kernel:
176 /* Will the rett land us in the invalid window? */
177 mov 2, %g1
178 sll %g1, %t_psr, %g1
179 rtrap_patch3: srl %g1, 8, %g2
180 or %g1, %g2, %g1
181 rd %wim, %g2
182 andcc %g2, %g1, %g0
183 be 1f ! Nope, just return from the trap
184 sll %g2, 0x1, %g1
185
186 /* We have to grab a window before returning. */
187 rtrap_patch4: srl %g2, 7, %g2
188 or %g1, %g2, %g1
189 rtrap_patch5: and %g1, 0xff, %g1
190
191 wr %g1, 0x0, %wim
192
193 /* Grrr, make sure we load from the right %sp... */
194 LOAD_PT_ALL(sp, t_psr, t_pc, t_npc, g1)
195
196 restore %g0, %g0, %g0
197 LOAD_WINDOW(sp)
198 b 2f
199 save %g0, %g0, %g0
200
201 /* Reload the entire frame in case this is from a
202 * kernel system call or whatever...
203 */
204 1:
205 LOAD_PT_ALL(sp, t_psr, t_pc, t_npc, g1)
206 2:
207 sethi %hi(PSR_SYSCALL), %twin_tmp1
208 andn %t_psr, %twin_tmp1, %t_psr
209 wr %t_psr, 0x0, %psr
210 WRITE_PAUSE
211
212 jmp %t_pc
213 rett %t_npc
214
215 ret_trap_user_stack_is_bolixed:
216 wr %t_wim, 0x0, %wim
217
218 wr %t_psr, PSR_ET, %psr
219 WRITE_PAUSE
220
221 call window_ret_fault
222 add %sp, STACKFRAME_SZ, %o0
223
224 b signal_p
225 ld [%curptr + TI_FLAGS], %g2
226
227 .globl srmmu_rett_stackchk
228 srmmu_rett_stackchk:
229 bne ret_trap_user_stack_is_bolixed
230 sethi %hi(PAGE_OFFSET), %g1
231 cmp %g1, %fp
232 bleu ret_trap_user_stack_is_bolixed
233 mov AC_M_SFSR, %g1
234 LEON_PI(lda [%g1] ASI_LEON_MMUREGS, %g0)
235 SUN_PI_(lda [%g1] ASI_M_MMUREGS, %g0)
236
237 LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %g1)
238 SUN_PI_(lda [%g0] ASI_M_MMUREGS, %g1)
239 or %g1, 0x2, %g1
240 LEON_PI(sta %g1, [%g0] ASI_LEON_MMUREGS)
241 SUN_PI_(sta %g1, [%g0] ASI_M_MMUREGS)
242
243 restore %g0, %g0, %g0
244
245 LOAD_WINDOW(sp)
246
247 save %g0, %g0, %g0
248
249 andn %g1, 0x2, %g1
250 LEON_PI(sta %g1, [%g0] ASI_LEON_MMUREGS)
251 SUN_PI_(sta %g1, [%g0] ASI_M_MMUREGS)
252
253 mov AC_M_SFAR, %g2
254 LEON_PI(lda [%g2] ASI_LEON_MMUREGS, %g2)
255 SUN_PI_(lda [%g2] ASI_M_MMUREGS, %g2)
256
257 mov AC_M_SFSR, %g1
258 LEON_PI(lda [%g1] ASI_LEON_MMUREGS, %g1)
259 SUN_PI_(lda [%g1] ASI_M_MMUREGS, %g1)
260 andcc %g1, 0x2, %g0
261 be ret_trap_userwins_ok
262 nop
263
264 b,a ret_trap_user_stack_is_bolixed
This page took 0.054063 seconds and 6 git commands to generate.