Commit | Line | Data |
---|---|---|
ca54502b MS |
1 | /* |
2 | * Low-level system-call handling, trap handlers and context-switching | |
3 | * | |
4 | * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> | |
5 | * Copyright (C) 2008-2009 PetaLogix | |
6 | * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au> | |
7 | * Copyright (C) 2001,2002 NEC Corporation | |
8 | * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org> | |
9 | * | |
10 | * This file is subject to the terms and conditions of the GNU General | |
11 | * Public License. See the file COPYING in the main directory of this | |
12 | * archive for more details. | |
13 | * | |
14 | * Written by Miles Bader <miles@gnu.org> | |
15 | * Heavily modified by John Williams for Microblaze | |
16 | */ | |
17 | ||
18 | #include <linux/sys.h> | |
19 | #include <linux/linkage.h> | |
20 | ||
21 | #include <asm/entry.h> | |
22 | #include <asm/current.h> | |
23 | #include <asm/processor.h> | |
24 | #include <asm/exceptions.h> | |
25 | #include <asm/asm-offsets.h> | |
26 | #include <asm/thread_info.h> | |
27 | ||
28 | #include <asm/page.h> | |
29 | #include <asm/unistd.h> | |
30 | ||
31 | #include <linux/errno.h> | |
32 | #include <asm/signal.h> | |
33 | ||
11d51360 MS |
34 | #undef DEBUG |
35 | ||
ca54502b MS |
36 | /* The size of a state save frame. */ |
37 | #define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE) | |
38 | ||
39 | /* The offset of the struct pt_regs in a `state save frame' on the stack. */ | |
40 | #define PTO STATE_SAVE_ARG_SPACE /* 24 the space for args */ | |
41 | ||
42 | #define C_ENTRY(name) .globl name; .align 4; name | |
43 | ||
44 | /* | |
45 | * Various ways of setting and clearing BIP in flags reg. | |
46 | * This is mucky, but necessary using microblaze version that | |
47 | * allows msr ops to write to BIP | |
48 | */ | |
49 | #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR | |
50 | .macro clear_bip | |
66f7de86 | 51 | msrclr r0, MSR_BIP |
ca54502b MS |
52 | nop |
53 | .endm | |
54 | ||
55 | .macro set_bip | |
66f7de86 | 56 | msrset r0, MSR_BIP |
ca54502b MS |
57 | nop |
58 | .endm | |
59 | ||
60 | .macro clear_eip | |
66f7de86 | 61 | msrclr r0, MSR_EIP |
ca54502b MS |
62 | nop |
63 | .endm | |
64 | ||
65 | .macro set_ee | |
66f7de86 | 66 | msrset r0, MSR_EE |
ca54502b MS |
67 | nop |
68 | .endm | |
69 | ||
70 | .macro disable_irq | |
66f7de86 | 71 | msrclr r0, MSR_IE |
ca54502b MS |
72 | nop |
73 | .endm | |
74 | ||
75 | .macro enable_irq | |
66f7de86 | 76 | msrset r0, MSR_IE |
ca54502b MS |
77 | nop |
78 | .endm | |
79 | ||
80 | .macro set_ums | |
66f7de86 | 81 | msrset r0, MSR_UMS |
ca54502b | 82 | nop |
66f7de86 | 83 | msrclr r0, MSR_VMS |
ca54502b MS |
84 | nop |
85 | .endm | |
86 | ||
87 | .macro set_vms | |
66f7de86 | 88 | msrclr r0, MSR_UMS |
ca54502b | 89 | nop |
66f7de86 | 90 | msrset r0, MSR_VMS |
ca54502b MS |
91 | nop |
92 | .endm | |
93 | ||
b318067e | 94 | .macro clear_ums |
66f7de86 | 95 | msrclr r0, MSR_UMS |
b318067e MS |
96 | nop |
97 | .endm | |
98 | ||
ca54502b | 99 | .macro clear_vms_ums |
66f7de86 | 100 | msrclr r0, MSR_VMS | MSR_UMS |
ca54502b MS |
101 | nop |
102 | .endm | |
103 | #else | |
104 | .macro clear_bip | |
105 | mfs r11, rmsr | |
106 | nop | |
107 | andi r11, r11, ~MSR_BIP | |
108 | mts rmsr, r11 | |
109 | nop | |
110 | .endm | |
111 | ||
112 | .macro set_bip | |
113 | mfs r11, rmsr | |
114 | nop | |
115 | ori r11, r11, MSR_BIP | |
116 | mts rmsr, r11 | |
117 | nop | |
118 | .endm | |
119 | ||
120 | .macro clear_eip | |
121 | mfs r11, rmsr | |
122 | nop | |
123 | andi r11, r11, ~MSR_EIP | |
124 | mts rmsr, r11 | |
125 | nop | |
126 | .endm | |
127 | ||
128 | .macro set_ee | |
129 | mfs r11, rmsr | |
130 | nop | |
131 | ori r11, r11, MSR_EE | |
132 | mts rmsr, r11 | |
133 | nop | |
134 | .endm | |
135 | ||
136 | .macro disable_irq | |
137 | mfs r11, rmsr | |
138 | nop | |
139 | andi r11, r11, ~MSR_IE | |
140 | mts rmsr, r11 | |
141 | nop | |
142 | .endm | |
143 | ||
144 | .macro enable_irq | |
145 | mfs r11, rmsr | |
146 | nop | |
147 | ori r11, r11, MSR_IE | |
148 | mts rmsr, r11 | |
149 | nop | |
150 | .endm | |
151 | ||
152 | .macro set_ums | |
153 | mfs r11, rmsr | |
154 | nop | |
155 | ori r11, r11, MSR_VMS | |
156 | andni r11, r11, MSR_UMS | |
157 | mts rmsr, r11 | |
158 | nop | |
159 | .endm | |
160 | ||
161 | .macro set_vms | |
162 | mfs r11, rmsr | |
163 | nop | |
164 | ori r11, r11, MSR_VMS | |
165 | andni r11, r11, MSR_UMS | |
166 | mts rmsr, r11 | |
167 | nop | |
168 | .endm | |
169 | ||
b318067e MS |
170 | .macro clear_ums |
171 | mfs r11, rmsr | |
172 | nop | |
173 | andni r11, r11, MSR_UMS | |
174 | mts rmsr,r11 | |
175 | nop | |
176 | .endm | |
177 | ||
ca54502b MS |
178 | .macro clear_vms_ums |
179 | mfs r11, rmsr | |
180 | nop | |
181 | andni r11, r11, (MSR_VMS|MSR_UMS) | |
182 | mts rmsr,r11 | |
183 | nop | |
184 | .endm | |
185 | #endif | |
186 | ||
187 | /* Define how to call high-level functions. With MMU, virtual mode must be | |
188 | * enabled when calling the high-level function. Clobbers R11. | |
189 | * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL | |
190 | */ | |
191 | ||
192 | /* turn on virtual protected mode save */ | |
193 | #define VM_ON \ | |
a4a94dbf | 194 | set_ums; \ |
ca54502b | 195 | rted r0, 2f; \ |
a4a94dbf MS |
196 | nop; \ |
197 | 2: | |
ca54502b MS |
198 | |
199 | /* turn off virtual protected mode save and user mode save*/ | |
200 | #define VM_OFF \ | |
a4a94dbf | 201 | clear_vms_ums; \ |
ca54502b | 202 | rted r0, TOPHYS(1f); \ |
a4a94dbf MS |
203 | nop; \ |
204 | 1: | |
ca54502b MS |
205 | |
206 | #define SAVE_REGS \ | |
207 | swi r2, r1, PTO+PT_R2; /* Save SDA */ \ | |
36f60954 MS |
208 | swi r3, r1, PTO+PT_R3; \ |
209 | swi r4, r1, PTO+PT_R4; \ | |
ca54502b MS |
210 | swi r5, r1, PTO+PT_R5; \ |
211 | swi r6, r1, PTO+PT_R6; \ | |
212 | swi r7, r1, PTO+PT_R7; \ | |
213 | swi r8, r1, PTO+PT_R8; \ | |
214 | swi r9, r1, PTO+PT_R9; \ | |
215 | swi r10, r1, PTO+PT_R10; \ | |
216 | swi r11, r1, PTO+PT_R11; /* save clobbered regs after rval */\ | |
217 | swi r12, r1, PTO+PT_R12; \ | |
218 | swi r13, r1, PTO+PT_R13; /* Save SDA2 */ \ | |
219 | swi r14, r1, PTO+PT_PC; /* PC, before IRQ/trap */ \ | |
220 | swi r15, r1, PTO+PT_R15; /* Save LP */ \ | |
221 | swi r18, r1, PTO+PT_R18; /* Save asm scratch reg */ \ | |
222 | swi r19, r1, PTO+PT_R19; \ | |
223 | swi r20, r1, PTO+PT_R20; \ | |
224 | swi r21, r1, PTO+PT_R21; \ | |
225 | swi r22, r1, PTO+PT_R22; \ | |
226 | swi r23, r1, PTO+PT_R23; \ | |
227 | swi r24, r1, PTO+PT_R24; \ | |
228 | swi r25, r1, PTO+PT_R25; \ | |
229 | swi r26, r1, PTO+PT_R26; \ | |
230 | swi r27, r1, PTO+PT_R27; \ | |
231 | swi r28, r1, PTO+PT_R28; \ | |
232 | swi r29, r1, PTO+PT_R29; \ | |
233 | swi r30, r1, PTO+PT_R30; \ | |
234 | swi r31, r1, PTO+PT_R31; /* Save current task reg */ \ | |
235 | mfs r11, rmsr; /* save MSR */ \ | |
236 | nop; \ | |
237 | swi r11, r1, PTO+PT_MSR; | |
238 | ||
239 | #define RESTORE_REGS \ | |
240 | lwi r11, r1, PTO+PT_MSR; \ | |
241 | mts rmsr , r11; \ | |
242 | nop; \ | |
243 | lwi r2, r1, PTO+PT_R2; /* restore SDA */ \ | |
36f60954 MS |
244 | lwi r3, r1, PTO+PT_R3; \ |
245 | lwi r4, r1, PTO+PT_R4; \ | |
ca54502b MS |
246 | lwi r5, r1, PTO+PT_R5; \ |
247 | lwi r6, r1, PTO+PT_R6; \ | |
248 | lwi r7, r1, PTO+PT_R7; \ | |
249 | lwi r8, r1, PTO+PT_R8; \ | |
250 | lwi r9, r1, PTO+PT_R9; \ | |
251 | lwi r10, r1, PTO+PT_R10; \ | |
252 | lwi r11, r1, PTO+PT_R11; /* restore clobbered regs after rval */\ | |
253 | lwi r12, r1, PTO+PT_R12; \ | |
254 | lwi r13, r1, PTO+PT_R13; /* restore SDA2 */ \ | |
255 | lwi r14, r1, PTO+PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\ | |
256 | lwi r15, r1, PTO+PT_R15; /* restore LP */ \ | |
257 | lwi r18, r1, PTO+PT_R18; /* restore asm scratch reg */ \ | |
258 | lwi r19, r1, PTO+PT_R19; \ | |
259 | lwi r20, r1, PTO+PT_R20; \ | |
260 | lwi r21, r1, PTO+PT_R21; \ | |
261 | lwi r22, r1, PTO+PT_R22; \ | |
262 | lwi r23, r1, PTO+PT_R23; \ | |
263 | lwi r24, r1, PTO+PT_R24; \ | |
264 | lwi r25, r1, PTO+PT_R25; \ | |
265 | lwi r26, r1, PTO+PT_R26; \ | |
266 | lwi r27, r1, PTO+PT_R27; \ | |
267 | lwi r28, r1, PTO+PT_R28; \ | |
268 | lwi r29, r1, PTO+PT_R29; \ | |
269 | lwi r30, r1, PTO+PT_R30; \ | |
270 | lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */ | |
271 | ||
e5d2af2b MS |
272 | #define SAVE_STATE \ |
273 | swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \ | |
274 | /* See if already in kernel mode.*/ \ | |
275 | mfs r1, rmsr; \ | |
276 | nop; \ | |
277 | andi r1, r1, MSR_UMS; \ | |
278 | bnei r1, 1f; \ | |
279 | /* Kernel-mode state save. */ \ | |
280 | /* Reload kernel stack-ptr. */ \ | |
281 | lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); \ | |
282 | tophys(r1,r1); \ | |
283 | addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\ | |
284 | SAVE_REGS \ | |
285 | swi r1, r1, PTO+PT_MODE; \ | |
286 | brid 2f; \ | |
287 | nop; /* Fill delay slot */ \ | |
288 | 1: /* User-mode state save. */ \ | |
289 | lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\ | |
290 | tophys(r1,r1); \ | |
291 | lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \ | |
292 | addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */\ | |
293 | tophys(r1,r1); \ | |
294 | addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\ | |
295 | SAVE_REGS \ | |
296 | swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */ \ | |
297 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \ | |
298 | swi r11, r1, PTO+PT_R1; /* Store user SP. */ \ | |
299 | /* MS: I am clearing UMS even in case when I come from kernel space */ \ | |
300 | clear_ums; \ | |
301 | 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); | |
302 | ||
ca54502b MS |
303 | .text |
304 | ||
305 | /* | |
306 | * User trap. | |
307 | * | |
308 | * System calls are handled here. | |
309 | * | |
310 | * Syscall protocol: | |
311 | * Syscall number in r12, args in r5-r10 | |
312 | * Return value in r3 | |
313 | * | |
314 | * Trap entered via brki instruction, so BIP bit is set, and interrupts | |
315 | * are masked. This is nice, means we don't have to CLI before state save | |
316 | */ | |
317 | C_ENTRY(_user_exception): | |
318 | swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */ | |
319 | addi r14, r14, 4 /* return address is 4 byte after call */ | |
ca54502b | 320 | |
653e447e | 321 | mfs r1, rmsr |
5c0d72b1 | 322 | nop |
653e447e MS |
323 | andi r1, r1, MSR_UMS |
324 | bnei r1, 1f | |
5c0d72b1 MS |
325 | |
326 | /* Kernel-mode state save - kernel execve */ | |
653e447e MS |
327 | lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/ |
328 | tophys(r1,r1); | |
ca54502b MS |
329 | |
330 | addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */ | |
331 | SAVE_REGS | |
332 | ||
77f6d226 | 333 | swi r1, r1, PTO + PT_MODE; /* pt_regs -> kernel mode */ |
ca54502b MS |
334 | brid 2f; |
335 | nop; /* Fill delay slot */ | |
336 | ||
337 | /* User-mode state save. */ | |
338 | 1: | |
ca54502b MS |
339 | lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ |
340 | tophys(r1,r1); | |
341 | lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */ | |
342 | /* calculate kernel stack pointer from task struct 8k */ | |
343 | addik r1, r1, THREAD_SIZE; | |
344 | tophys(r1,r1); | |
345 | ||
346 | addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */ | |
347 | SAVE_REGS | |
348 | ||
77f6d226 | 349 | swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */ |
ca54502b MS |
350 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); |
351 | swi r11, r1, PTO+PT_R1; /* Store user SP. */ | |
b1d70c62 | 352 | 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); |
ca54502b MS |
353 | /* Save away the syscall number. */ |
354 | swi r12, r1, PTO+PT_R0; | |
355 | tovirt(r1,r1) | |
356 | ||
ca54502b MS |
357 | /* where the trap should return need -8 to adjust for rtsd r15, 8*/ |
358 | /* Jump to the appropriate function for the system call number in r12 | |
359 | * (r12 is not preserved), or return an error if r12 is not valid. The LP | |
360 | * register should point to the location where | |
361 | * the called function should return. [note that MAKE_SYS_CALL uses label 1] */ | |
23575483 MS |
362 | |
363 | # Step into virtual mode. | |
364 | set_vms; | |
365 | addik r11, r0, 3f | |
366 | rtid r11, 0 | |
367 | nop | |
368 | 3: | |
b1d70c62 | 369 | lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */ |
23575483 MS |
370 | lwi r11, r11, TI_FLAGS /* get flags in thread info */ |
371 | andi r11, r11, _TIF_WORK_SYSCALL_MASK | |
372 | beqi r11, 4f | |
373 | ||
374 | addik r3, r0, -ENOSYS | |
375 | swi r3, r1, PTO + PT_R3 | |
376 | brlid r15, do_syscall_trace_enter | |
377 | addik r5, r1, PTO + PT_R0 | |
378 | ||
379 | # do_syscall_trace_enter returns the new syscall nr. | |
380 | addk r12, r0, r3 | |
381 | lwi r5, r1, PTO+PT_R5; | |
382 | lwi r6, r1, PTO+PT_R6; | |
383 | lwi r7, r1, PTO+PT_R7; | |
384 | lwi r8, r1, PTO+PT_R8; | |
385 | lwi r9, r1, PTO+PT_R9; | |
386 | lwi r10, r1, PTO+PT_R10; | |
387 | 4: | |
388 | /* Jump to the appropriate function for the system call number in r12 | |
389 | * (r12 is not preserved), or return an error if r12 is not valid. | |
390 | * The LP register should point to the location where the called function | |
391 | * should return. [note that MAKE_SYS_CALL uses label 1] */ | |
392 | /* See if the system call number is valid */ | |
ca54502b | 393 | addi r11, r12, -__NR_syscalls; |
23575483 | 394 | bgei r11,5f; |
ca54502b MS |
395 | /* Figure out which function to use for this system call. */ |
396 | /* Note Microblaze barrel shift is optional, so don't rely on it */ | |
397 | add r12, r12, r12; /* convert num -> ptr */ | |
398 | add r12, r12, r12; | |
399 | ||
11d51360 | 400 | #ifdef DEBUG |
ca54502b | 401 | /* Trac syscalls and stored them to r0_ram */ |
23575483 | 402 | lwi r3, r12, 0x400 + r0_ram |
ca54502b | 403 | addi r3, r3, 1 |
23575483 | 404 | swi r3, r12, 0x400 + r0_ram |
11d51360 | 405 | #endif |
23575483 MS |
406 | |
407 | # Find and jump into the syscall handler. | |
408 | lwi r12, r12, sys_call_table | |
409 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ | |
b9ea77e2 | 410 | addi r15, r0, ret_from_trap-8 |
23575483 | 411 | bra r12 |
ca54502b | 412 | |
ca54502b | 413 | /* The syscall number is invalid, return an error. */ |
23575483 | 414 | 5: |
ca54502b MS |
415 | addi r3, r0, -ENOSYS; |
416 | rtsd r15,8; /* looks like a normal subroutine return */ | |
417 | or r0, r0, r0 | |
418 | ||
419 | ||
23575483 | 420 | /* Entry point used to return from a syscall/trap */ |
ca54502b MS |
421 | /* We re-enable BIP bit before state restore */ |
422 | C_ENTRY(ret_from_trap): | |
b1d70c62 MS |
423 | swi r3, r1, PTO + PT_R3 |
424 | swi r4, r1, PTO + PT_R4 | |
425 | ||
77f6d226 | 426 | lwi r11, r1, PTO + PT_MODE; |
36f60954 MS |
427 | /* See if returning to kernel mode, if so, skip resched &c. */ |
428 | bnei r11, 2f; | |
23575483 MS |
429 | /* We're returning to user mode, so check for various conditions that |
430 | * trigger rescheduling. */ | |
b1d70c62 MS |
431 | /* FIXME: Restructure all these flag checks. */ |
432 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ | |
23575483 MS |
433 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
434 | andi r11, r11, _TIF_WORK_SYSCALL_MASK | |
435 | beqi r11, 1f | |
436 | ||
23575483 MS |
437 | brlid r15, do_syscall_trace_leave |
438 | addik r5, r1, PTO + PT_R0 | |
23575483 | 439 | 1: |
ca54502b MS |
440 | /* We're returning to user mode, so check for various conditions that |
441 | * trigger rescheduling. */ | |
b1d70c62 MS |
442 | /* get thread info from current task */ |
443 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; | |
ca54502b MS |
444 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
445 | andi r11, r11, _TIF_NEED_RESCHED; | |
446 | beqi r11, 5f; | |
447 | ||
ca54502b MS |
448 | bralid r15, schedule; /* Call scheduler */ |
449 | nop; /* delay slot */ | |
ca54502b MS |
450 | |
451 | /* Maybe handle a signal */ | |
b1d70c62 MS |
452 | 5: /* get thread info from current task*/ |
453 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; | |
ca54502b MS |
454 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
455 | andi r11, r11, _TIF_SIGPENDING; | |
456 | beqi r11, 1f; /* Signals to handle, handle them */ | |
457 | ||
b9ea77e2 | 458 | addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ |
ca54502b MS |
459 | addi r7, r0, 1; /* Arg 3: int in_syscall */ |
460 | bralid r15, do_signal; /* Handle any signals */ | |
841d6e8c | 461 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ |
b1d70c62 MS |
462 | |
463 | /* Finally, return to user state. */ | |
96014cc3 | 464 | 1: set_bip; /* Ints masked for state restore */ |
8633bebc | 465 | swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */ |
ca54502b MS |
466 | VM_OFF; |
467 | tophys(r1,r1); | |
468 | RESTORE_REGS; | |
469 | addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */ | |
470 | lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */ | |
471 | bri 6f; | |
472 | ||
473 | /* Return to kernel state. */ | |
96014cc3 MS |
474 | 2: set_bip; /* Ints masked for state restore */ |
475 | VM_OFF; | |
ca54502b MS |
476 | tophys(r1,r1); |
477 | RESTORE_REGS; | |
478 | addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */ | |
479 | tovirt(r1,r1); | |
480 | 6: | |
481 | TRAP_return: /* Make global symbol for debugging */ | |
482 | rtbd r14, 0; /* Instructions to return from an IRQ */ | |
483 | nop; | |
484 | ||
485 | ||
486 | /* These syscalls need access to the struct pt_regs on the stack, so we | |
487 | implement them in assembly (they're basically all wrappers anyway). */ | |
488 | ||
489 | C_ENTRY(sys_fork_wrapper): | |
490 | addi r5, r0, SIGCHLD /* Arg 0: flags */ | |
491 | lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */ | |
b9ea77e2 | 492 | addik r7, r1, PTO /* Arg 2: parent context */ |
ca54502b MS |
493 | add r8. r0, r0 /* Arg 3: (unused) */ |
494 | add r9, r0, r0; /* Arg 4: (unused) */ | |
495 | add r10, r0, r0; /* Arg 5: (unused) */ | |
496 | brid do_fork /* Do real work (tail-call) */ | |
497 | nop; | |
498 | ||
499 | /* This the initial entry point for a new child thread, with an appropriate | |
500 | stack in place that makes it look the the child is in the middle of an | |
501 | syscall. This function is actually `returned to' from switch_thread | |
502 | (copy_thread makes ret_from_fork the return address in each new thread's | |
503 | saved context). */ | |
504 | C_ENTRY(ret_from_fork): | |
505 | bralid r15, schedule_tail; /* ...which is schedule_tail's arg */ | |
506 | add r3, r5, r0; /* switch_thread returns the prev task */ | |
507 | /* ( in the delay slot ) */ | |
508 | add r3, r0, r0; /* Child's fork call should return 0. */ | |
509 | brid ret_from_trap; /* Do normal trap return */ | |
510 | nop; | |
511 | ||
e513588f AB |
512 | C_ENTRY(sys_vfork): |
513 | brid microblaze_vfork /* Do real work (tail-call) */ | |
b9ea77e2 | 514 | addik r5, r1, PTO |
ca54502b | 515 | |
e513588f | 516 | C_ENTRY(sys_clone): |
ca54502b | 517 | bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */ |
570e3e23 | 518 | lwi r6, r1, PTO + PT_R1; /* If so, use paret's stack ptr */ |
b9ea77e2 MS |
519 | 1: addik r7, r1, PTO; /* Arg 2: parent context */ |
520 | add r8, r0, r0; /* Arg 3: (unused) */ | |
521 | add r9, r0, r0; /* Arg 4: (unused) */ | |
522 | add r10, r0, r0; /* Arg 5: (unused) */ | |
523 | brid do_fork /* Do real work (tail-call) */ | |
524 | nop; | |
ca54502b | 525 | |
e513588f | 526 | C_ENTRY(sys_execve): |
b9ea77e2 | 527 | addik r8, r1, PTO; /* add user context as 4th arg */ |
e513588f | 528 | brid microblaze_execve; /* Do real work (tail-call).*/ |
ca54502b MS |
529 | nop; |
530 | ||
ca54502b MS |
531 | C_ENTRY(sys_rt_sigreturn_wrapper): |
532 | swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */ | |
533 | swi r4, r1, PTO+PT_R4; | |
b9ea77e2 | 534 | addik r5, r1, PTO; /* add user context as 1st arg */ |
ca54502b MS |
535 | brlid r15, sys_rt_sigreturn /* Do real work */ |
536 | nop; | |
537 | lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */ | |
538 | lwi r4, r1, PTO+PT_R4; | |
539 | bri ret_from_trap /* fall through will not work here due to align */ | |
540 | nop; | |
541 | ||
542 | /* | |
543 | * HW EXCEPTION rutine start | |
544 | */ | |
ca54502b | 545 | C_ENTRY(full_exception_trap): |
ca54502b MS |
546 | /* adjust exception address for privileged instruction |
547 | * for finding where is it */ | |
548 | addik r17, r17, -4 | |
549 | SAVE_STATE /* Save registers */ | |
06a54604 MS |
550 | /* PC, before IRQ/trap - this is one instruction above */ |
551 | swi r17, r1, PTO+PT_PC; | |
552 | tovirt(r1,r1) | |
ca54502b MS |
553 | /* FIXME this can be store directly in PT_ESR reg. |
554 | * I tested it but there is a fault */ | |
555 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ | |
b9ea77e2 MS |
556 | addik r15, r0, ret_from_exc - 8 |
557 | addik r5, r1, PTO /* parameter struct pt_regs * regs */ | |
ca54502b MS |
558 | mfs r6, resr |
559 | nop | |
560 | mfs r7, rfsr; /* save FSR */ | |
561 | nop | |
131e4e97 MS |
562 | mts rfsr, r0; /* Clear sticky fsr */ |
563 | nop | |
c318d483 MS |
564 | rted r0, full_exception |
565 | nop | |
ca54502b MS |
566 | |
567 | /* | |
568 | * Unaligned data trap. | |
569 | * | |
570 | * Unaligned data trap last on 4k page is handled here. | |
571 | * | |
572 | * Trap entered via exception, so EE bit is set, and interrupts | |
573 | * are masked. This is nice, means we don't have to CLI before state save | |
574 | * | |
575 | * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S" | |
576 | */ | |
577 | C_ENTRY(unaligned_data_trap): | |
8b110d15 MS |
578 | /* MS: I have to save r11 value and then restore it because |
579 | * set_bit, clear_eip, set_ee use r11 as temp register if MSR | |
580 | * instructions are not used. We don't need to do if MSR instructions | |
581 | * are used and they use r0 instead of r11. | |
582 | * I am using ENTRY_SP which should be primary used only for stack | |
583 | * pointer saving. */ | |
584 | swi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); | |
585 | set_bip; /* equalize initial state for all possible entries */ | |
586 | clear_eip; | |
587 | set_ee; | |
588 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); | |
ca54502b | 589 | SAVE_STATE /* Save registers.*/ |
06a54604 MS |
590 | /* PC, before IRQ/trap - this is one instruction above */ |
591 | swi r17, r1, PTO+PT_PC; | |
592 | tovirt(r1,r1) | |
ca54502b | 593 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ |
b9ea77e2 | 594 | addik r15, r0, ret_from_exc-8 |
ca54502b MS |
595 | mfs r3, resr /* ESR */ |
596 | nop | |
597 | mfs r4, rear /* EAR */ | |
598 | nop | |
c318d483 | 599 | rtbd r0, _unaligned_data_exception |
b9ea77e2 | 600 | addik r7, r1, PTO /* parameter struct pt_regs * regs */ |
ca54502b MS |
601 | |
602 | /* | |
603 | * Page fault traps. | |
604 | * | |
605 | * If the real exception handler (from hw_exception_handler.S) didn't find | |
606 | * the mapping for the process, then we're thrown here to handle such situation. | |
607 | * | |
608 | * Trap entered via exceptions, so EE bit is set, and interrupts | |
609 | * are masked. This is nice, means we don't have to CLI before state save | |
610 | * | |
611 | * Build a standard exception frame for TLB Access errors. All TLB exceptions | |
612 | * will bail out to this point if they can't resolve the lightweight TLB fault. | |
613 | * | |
614 | * The C function called is in "arch/microblaze/mm/fault.c", declared as: | |
615 | * void do_page_fault(struct pt_regs *regs, | |
616 | * unsigned long address, | |
617 | * unsigned long error_code) | |
618 | */ | |
619 | /* data and intruction trap - which is choose is resolved int fault.c */ | |
620 | C_ENTRY(page_fault_data_trap): | |
ca54502b | 621 | SAVE_STATE /* Save registers.*/ |
06a54604 MS |
622 | /* PC, before IRQ/trap - this is one instruction above */ |
623 | swi r17, r1, PTO+PT_PC; | |
624 | tovirt(r1,r1) | |
ca54502b | 625 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ |
b9ea77e2 MS |
626 | addik r15, r0, ret_from_exc-8 |
627 | addik r5, r1, PTO /* parameter struct pt_regs * regs */ | |
ca54502b MS |
628 | mfs r6, rear /* parameter unsigned long address */ |
629 | nop | |
630 | mfs r7, resr /* parameter unsigned long error_code */ | |
631 | nop | |
c318d483 MS |
632 | rted r0, do_page_fault |
633 | nop | |
ca54502b MS |
634 | |
635 | C_ENTRY(page_fault_instr_trap): | |
ca54502b | 636 | SAVE_STATE /* Save registers.*/ |
06a54604 MS |
637 | /* PC, before IRQ/trap - this is one instruction above */ |
638 | swi r17, r1, PTO+PT_PC; | |
639 | tovirt(r1,r1) | |
ca54502b | 640 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ |
b9ea77e2 MS |
641 | addik r15, r0, ret_from_exc-8 |
642 | addik r5, r1, PTO /* parameter struct pt_regs * regs */ | |
ca54502b MS |
643 | mfs r6, rear /* parameter unsigned long address */ |
644 | nop | |
c318d483 | 645 | rted r0, do_page_fault |
ca54502b | 646 | ori r7, r0, 0 /* parameter unsigned long error_code */ |
ca54502b MS |
647 | |
648 | /* Entry point used to return from an exception. */ | |
649 | C_ENTRY(ret_from_exc): | |
77f6d226 | 650 | lwi r11, r1, PTO + PT_MODE; |
ca54502b MS |
651 | bnei r11, 2f; /* See if returning to kernel mode, */ |
652 | /* ... if so, skip resched &c. */ | |
653 | ||
654 | /* We're returning to user mode, so check for various conditions that | |
655 | trigger rescheduling. */ | |
b1d70c62 | 656 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ |
ca54502b MS |
657 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
658 | andi r11, r11, _TIF_NEED_RESCHED; | |
659 | beqi r11, 5f; | |
660 | ||
661 | /* Call the scheduler before returning from a syscall/trap. */ | |
662 | bralid r15, schedule; /* Call scheduler */ | |
663 | nop; /* delay slot */ | |
664 | ||
665 | /* Maybe handle a signal */ | |
b1d70c62 | 666 | 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ |
ca54502b MS |
667 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
668 | andi r11, r11, _TIF_SIGPENDING; | |
669 | beqi r11, 1f; /* Signals to handle, handle them */ | |
670 | ||
671 | /* | |
672 | * Handle a signal return; Pending signals should be in r18. | |
673 | * | |
674 | * Not all registers are saved by the normal trap/interrupt entry | |
675 | * points (for instance, call-saved registers (because the normal | |
676 | * C-compiler calling sequence in the kernel makes sure they're | |
677 | * preserved), and call-clobbered registers in the case of | |
678 | * traps), but signal handlers may want to examine or change the | |
679 | * complete register state. Here we save anything not saved by | |
680 | * the normal entry sequence, so that it may be safely restored | |
36f60954 | 681 | * (in a possibly modified form) after do_signal returns. */ |
b9ea77e2 | 682 | addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ |
ca54502b MS |
683 | addi r7, r0, 0; /* Arg 3: int in_syscall */ |
684 | bralid r15, do_signal; /* Handle any signals */ | |
841d6e8c | 685 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ |
ca54502b MS |
686 | |
687 | /* Finally, return to user state. */ | |
96014cc3 | 688 | 1: set_bip; /* Ints masked for state restore */ |
8633bebc | 689 | swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */ |
ca54502b MS |
690 | VM_OFF; |
691 | tophys(r1,r1); | |
692 | ||
ca54502b MS |
693 | RESTORE_REGS; |
694 | addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */ | |
695 | ||
696 | lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */ | |
697 | bri 6f; | |
698 | /* Return to kernel state. */ | |
96014cc3 MS |
699 | 2: set_bip; /* Ints masked for state restore */ |
700 | VM_OFF; | |
ca54502b | 701 | tophys(r1,r1); |
ca54502b MS |
702 | RESTORE_REGS; |
703 | addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */ | |
704 | ||
705 | tovirt(r1,r1); | |
706 | 6: | |
707 | EXC_return: /* Make global symbol for debugging */ | |
708 | rtbd r14, 0; /* Instructions to return from an IRQ */ | |
709 | nop; | |
710 | ||
711 | /* | |
712 | * HW EXCEPTION rutine end | |
713 | */ | |
714 | ||
715 | /* | |
716 | * Hardware maskable interrupts. | |
717 | * | |
718 | * The stack-pointer (r1) should have already been saved to the memory | |
719 | * location PER_CPU(ENTRY_SP). | |
720 | */ | |
721 | C_ENTRY(_interrupt): | |
722 | /* MS: we are in physical address */ | |
723 | /* Save registers, switch to proper stack, convert SP to virtual.*/ | |
724 | swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) | |
ca54502b | 725 | /* MS: See if already in kernel mode. */ |
653e447e | 726 | mfs r1, rmsr |
5c0d72b1 | 727 | nop |
653e447e MS |
728 | andi r1, r1, MSR_UMS |
729 | bnei r1, 1f | |
ca54502b MS |
730 | |
731 | /* Kernel-mode state save. */ | |
653e447e MS |
732 | lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) |
733 | tophys(r1,r1); /* MS: I have in r1 physical address where stack is */ | |
ca54502b MS |
734 | /* save registers */ |
735 | /* MS: Make room on the stack -> activation record */ | |
736 | addik r1, r1, -STATE_SAVE_SIZE; | |
ca54502b | 737 | SAVE_REGS |
77f6d226 | 738 | swi r1, r1, PTO + PT_MODE; /* 0 - user mode, 1 - kernel mode */ |
ca54502b MS |
739 | brid 2f; |
740 | nop; /* MS: Fill delay slot */ | |
741 | ||
742 | 1: | |
743 | /* User-mode state save. */ | |
ca54502b MS |
744 | /* MS: get the saved current */ |
745 | lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); | |
746 | tophys(r1,r1); | |
747 | lwi r1, r1, TS_THREAD_INFO; | |
748 | addik r1, r1, THREAD_SIZE; | |
749 | tophys(r1,r1); | |
750 | /* save registers */ | |
751 | addik r1, r1, -STATE_SAVE_SIZE; | |
ca54502b MS |
752 | SAVE_REGS |
753 | /* calculate mode */ | |
754 | swi r0, r1, PTO + PT_MODE; | |
755 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); | |
756 | swi r11, r1, PTO+PT_R1; | |
ca54502b | 757 | 2: |
b1d70c62 | 758 | lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); |
ca54502b | 759 | tovirt(r1,r1) |
b9ea77e2 | 760 | addik r5, r1, PTO; |
ca54502b | 761 | set_vms; |
b9ea77e2 MS |
762 | addik r11, r0, do_IRQ; |
763 | addik r15, r0, irq_call; | |
ca54502b MS |
764 | irq_call:rtbd r11, 0; |
765 | nop; | |
766 | ||
767 | /* MS: we are in virtual mode */ | |
768 | ret_from_irq: | |
769 | lwi r11, r1, PTO + PT_MODE; | |
770 | bnei r11, 2f; | |
771 | ||
b1d70c62 | 772 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; |
ca54502b MS |
773 | lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */ |
774 | andi r11, r11, _TIF_NEED_RESCHED; | |
775 | beqi r11, 5f | |
776 | bralid r15, schedule; | |
777 | nop; /* delay slot */ | |
778 | ||
779 | /* Maybe handle a signal */ | |
b1d70c62 | 780 | 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */ |
ca54502b MS |
781 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
782 | andi r11, r11, _TIF_SIGPENDING; | |
783 | beqid r11, no_intr_resched | |
784 | /* Handle a signal return; Pending signals should be in r18. */ | |
785 | addi r7, r0, 0; /* Arg 3: int in_syscall */ | |
b9ea77e2 | 786 | addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ |
ca54502b MS |
787 | bralid r15, do_signal; /* Handle any signals */ |
788 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ | |
789 | ||
790 | /* Finally, return to user state. */ | |
791 | no_intr_resched: | |
792 | /* Disable interrupts, we are now committed to the state restore */ | |
793 | disable_irq | |
8633bebc | 794 | swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); |
ca54502b MS |
795 | VM_OFF; |
796 | tophys(r1,r1); | |
ca54502b MS |
797 | RESTORE_REGS |
798 | addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */ | |
799 | lwi r1, r1, PT_R1 - PT_SIZE; | |
800 | bri 6f; | |
801 | /* MS: Return to kernel state. */ | |
77753790 MS |
802 | 2: |
803 | #ifdef CONFIG_PREEMPT | |
b1d70c62 | 804 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; |
77753790 MS |
805 | /* MS: get preempt_count from thread info */ |
806 | lwi r5, r11, TI_PREEMPT_COUNT; | |
807 | bgti r5, restore; | |
808 | ||
809 | lwi r5, r11, TI_FLAGS; /* get flags in thread info */ | |
810 | andi r5, r5, _TIF_NEED_RESCHED; | |
811 | beqi r5, restore /* if zero jump over */ | |
812 | ||
813 | preempt: | |
814 | /* interrupts are off that's why I am calling preempt_chedule_irq */ | |
815 | bralid r15, preempt_schedule_irq | |
816 | nop | |
b1d70c62 | 817 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ |
77753790 MS |
818 | lwi r5, r11, TI_FLAGS; /* get flags in thread info */ |
819 | andi r5, r5, _TIF_NEED_RESCHED; | |
820 | bnei r5, preempt /* if non zero jump to resched */ | |
821 | restore: | |
822 | #endif | |
823 | VM_OFF /* MS: turn off MMU */ | |
ca54502b | 824 | tophys(r1,r1) |
ca54502b MS |
825 | RESTORE_REGS |
826 | addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */ | |
827 | tovirt(r1,r1); | |
828 | 6: | |
829 | IRQ_return: /* MS: Make global symbol for debugging */ | |
830 | rtid r14, 0 | |
831 | nop | |
832 | ||
833 | /* | |
834 | * `Debug' trap | |
835 | * We enter dbtrap in "BIP" (breakpoint) mode. | |
836 | * So we exit the breakpoint mode with an 'rtbd' and proceed with the | |
837 | * original dbtrap. | |
838 | * however, wait to save state first | |
839 | */ | |
840 | C_ENTRY(_debug_exception): | |
841 | /* BIP bit is set on entry, no interrupts can occur */ | |
842 | swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) | |
843 | ||
653e447e | 844 | mfs r1, rmsr |
5c0d72b1 | 845 | nop |
653e447e MS |
846 | andi r1, r1, MSR_UMS |
847 | bnei r1, 1f | |
ca54502b | 848 | /* Kernel-mode state save. */ |
653e447e MS |
849 | lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/ |
850 | tophys(r1,r1); | |
ca54502b MS |
851 | |
852 | addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */ | |
ca54502b MS |
853 | SAVE_REGS; |
854 | ||
77f6d226 | 855 | swi r1, r1, PTO + PT_MODE; |
ca54502b MS |
856 | brid 2f; |
857 | nop; /* Fill delay slot */ | |
858 | 1: /* User-mode state save. */ | |
ca54502b MS |
859 | lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ |
860 | tophys(r1,r1); | |
861 | lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ | |
862 | addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */ | |
863 | tophys(r1,r1); | |
864 | ||
865 | addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */ | |
ca54502b MS |
866 | SAVE_REGS; |
867 | ||
77f6d226 | 868 | swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */ |
ca54502b MS |
869 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); |
870 | swi r11, r1, PTO+PT_R1; /* Store user SP. */ | |
653e447e | 871 | 2: |
ca54502b MS |
872 | tovirt(r1,r1) |
873 | ||
06b28640 | 874 | set_vms; |
ca54502b MS |
875 | addi r5, r0, SIGTRAP /* send the trap signal */ |
876 | add r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | |
877 | addk r7, r0, r0 /* 3rd param zero */ | |
06b28640 | 878 | dbtrap_call: rtbd r0, send_sig; |
b9ea77e2 | 879 | addik r15, r0, dbtrap_call; |
ca54502b MS |
880 | |
881 | set_bip; /* Ints masked for state restore*/ | |
77f6d226 | 882 | lwi r11, r1, PTO + PT_MODE; |
ca54502b MS |
883 | bnei r11, 2f; |
884 | ||
885 | /* Get current task ptr into r11 */ | |
b1d70c62 | 886 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ |
ca54502b MS |
887 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
888 | andi r11, r11, _TIF_NEED_RESCHED; | |
889 | beqi r11, 5f; | |
890 | ||
891 | /* Call the scheduler before returning from a syscall/trap. */ | |
892 | ||
893 | bralid r15, schedule; /* Call scheduler */ | |
894 | nop; /* delay slot */ | |
895 | /* XXX Is PT_DTRACE handling needed here? */ | |
896 | /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */ | |
897 | ||
898 | /* Maybe handle a signal */ | |
b1d70c62 | 899 | 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ |
ca54502b MS |
900 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
901 | andi r11, r11, _TIF_SIGPENDING; | |
902 | beqi r11, 1f; /* Signals to handle, handle them */ | |
903 | ||
904 | /* Handle a signal return; Pending signals should be in r18. */ | |
905 | /* Not all registers are saved by the normal trap/interrupt entry | |
906 | points (for instance, call-saved registers (because the normal | |
907 | C-compiler calling sequence in the kernel makes sure they're | |
908 | preserved), and call-clobbered registers in the case of | |
909 | traps), but signal handlers may want to examine or change the | |
910 | complete register state. Here we save anything not saved by | |
911 | the normal entry sequence, so that it may be safely restored | |
912 | (in a possibly modified form) after do_signal returns. */ | |
913 | ||
b9ea77e2 | 914 | addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ |
ca54502b MS |
915 | addi r7, r0, 0; /* Arg 3: int in_syscall */ |
916 | bralid r15, do_signal; /* Handle any signals */ | |
841d6e8c | 917 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ |
ca54502b MS |
918 | |
919 | ||
920 | /* Finally, return to user state. */ | |
5c0d72b1 | 921 | 1: |
8633bebc | 922 | swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */ |
ca54502b MS |
923 | VM_OFF; |
924 | tophys(r1,r1); | |
925 | ||
ca54502b MS |
926 | RESTORE_REGS |
927 | addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */ | |
928 | ||
929 | ||
930 | lwi r1, r1, PT_R1 - PT_SIZE; | |
931 | /* Restore user stack pointer. */ | |
932 | bri 6f; | |
933 | ||
934 | /* Return to kernel state. */ | |
935 | 2: VM_OFF; | |
936 | tophys(r1,r1); | |
ca54502b MS |
937 | RESTORE_REGS |
938 | addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */ | |
939 | ||
940 | tovirt(r1,r1); | |
941 | 6: | |
942 | DBTRAP_return: /* Make global symbol for debugging */ | |
943 | rtbd r14, 0; /* Instructions to return from an IRQ */ | |
944 | nop; | |
945 | ||
946 | ||
947 | ||
948 | ENTRY(_switch_to) | |
949 | /* prepare return value */ | |
b1d70c62 | 950 | addk r3, r0, CURRENT_TASK |
ca54502b MS |
951 | |
952 | /* save registers in cpu_context */ | |
953 | /* use r11 and r12, volatile registers, as temp register */ | |
954 | /* give start of cpu_context for previous process */ | |
955 | addik r11, r5, TI_CPU_CONTEXT | |
956 | swi r1, r11, CC_R1 | |
957 | swi r2, r11, CC_R2 | |
958 | /* skip volatile registers. | |
959 | * they are saved on stack when we jumped to _switch_to() */ | |
960 | /* dedicated registers */ | |
961 | swi r13, r11, CC_R13 | |
962 | swi r14, r11, CC_R14 | |
963 | swi r15, r11, CC_R15 | |
964 | swi r16, r11, CC_R16 | |
965 | swi r17, r11, CC_R17 | |
966 | swi r18, r11, CC_R18 | |
967 | /* save non-volatile registers */ | |
968 | swi r19, r11, CC_R19 | |
969 | swi r20, r11, CC_R20 | |
970 | swi r21, r11, CC_R21 | |
971 | swi r22, r11, CC_R22 | |
972 | swi r23, r11, CC_R23 | |
973 | swi r24, r11, CC_R24 | |
974 | swi r25, r11, CC_R25 | |
975 | swi r26, r11, CC_R26 | |
976 | swi r27, r11, CC_R27 | |
977 | swi r28, r11, CC_R28 | |
978 | swi r29, r11, CC_R29 | |
979 | swi r30, r11, CC_R30 | |
980 | /* special purpose registers */ | |
981 | mfs r12, rmsr | |
982 | nop | |
983 | swi r12, r11, CC_MSR | |
984 | mfs r12, rear | |
985 | nop | |
986 | swi r12, r11, CC_EAR | |
987 | mfs r12, resr | |
988 | nop | |
989 | swi r12, r11, CC_ESR | |
990 | mfs r12, rfsr | |
991 | nop | |
992 | swi r12, r11, CC_FSR | |
993 | ||
b1d70c62 MS |
994 | /* update r31, the current-give me pointer to task which will be next */ |
995 | lwi CURRENT_TASK, r6, TI_TASK | |
ca54502b | 996 | /* stored it to current_save too */ |
b1d70c62 | 997 | swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE) |
ca54502b MS |
998 | |
999 | /* get new process' cpu context and restore */ | |
1000 | /* give me start where start context of next task */ | |
1001 | addik r11, r6, TI_CPU_CONTEXT | |
1002 | ||
1003 | /* non-volatile registers */ | |
1004 | lwi r30, r11, CC_R30 | |
1005 | lwi r29, r11, CC_R29 | |
1006 | lwi r28, r11, CC_R28 | |
1007 | lwi r27, r11, CC_R27 | |
1008 | lwi r26, r11, CC_R26 | |
1009 | lwi r25, r11, CC_R25 | |
1010 | lwi r24, r11, CC_R24 | |
1011 | lwi r23, r11, CC_R23 | |
1012 | lwi r22, r11, CC_R22 | |
1013 | lwi r21, r11, CC_R21 | |
1014 | lwi r20, r11, CC_R20 | |
1015 | lwi r19, r11, CC_R19 | |
1016 | /* dedicated registers */ | |
1017 | lwi r18, r11, CC_R18 | |
1018 | lwi r17, r11, CC_R17 | |
1019 | lwi r16, r11, CC_R16 | |
1020 | lwi r15, r11, CC_R15 | |
1021 | lwi r14, r11, CC_R14 | |
1022 | lwi r13, r11, CC_R13 | |
1023 | /* skip volatile registers */ | |
1024 | lwi r2, r11, CC_R2 | |
1025 | lwi r1, r11, CC_R1 | |
1026 | ||
1027 | /* special purpose registers */ | |
1028 | lwi r12, r11, CC_FSR | |
1029 | mts rfsr, r12 | |
1030 | nop | |
1031 | lwi r12, r11, CC_MSR | |
1032 | mts rmsr, r12 | |
1033 | nop | |
1034 | ||
1035 | rtsd r15, 8 | |
1036 | nop | |
1037 | ||
1038 | ENTRY(_reset) | |
1039 | brai 0x70; /* Jump back to FS-boot */ | |
1040 | ||
1041 | ENTRY(_break) | |
1042 | mfs r5, rmsr | |
1043 | nop | |
1044 | swi r5, r0, 0x250 + TOPHYS(r0_ram) | |
1045 | mfs r5, resr | |
1046 | nop | |
1047 | swi r5, r0, 0x254 + TOPHYS(r0_ram) | |
1048 | bri 0 | |
1049 | ||
1050 | /* These are compiled and loaded into high memory, then | |
1051 | * copied into place in mach_early_setup */ | |
1052 | .section .init.ivt, "ax" | |
1053 | .org 0x0 | |
1054 | /* this is very important - here is the reset vector */ | |
1055 | /* in current MMU branch you don't care what is here - it is | |
1056 | * used from bootloader site - but this is correct for FS-BOOT */ | |
1057 | brai 0x70 | |
1058 | nop | |
1059 | brai TOPHYS(_user_exception); /* syscall handler */ | |
1060 | brai TOPHYS(_interrupt); /* Interrupt handler */ | |
1061 | brai TOPHYS(_break); /* nmi trap handler */ | |
1062 | brai TOPHYS(_hw_exception_handler); /* HW exception handler */ | |
1063 | ||
1064 | .org 0x60 | |
1065 | brai TOPHYS(_debug_exception); /* debug trap handler*/ | |
1066 | ||
1067 | .section .rodata,"a" | |
1068 | #include "syscall_table.S" | |
1069 | ||
1070 | syscall_table_size=(.-sys_call_table) | |
1071 | ||
ce3266c0 SM |
1072 | type_SYSCALL: |
1073 | .ascii "SYSCALL\0" | |
1074 | type_IRQ: | |
1075 | .ascii "IRQ\0" | |
1076 | type_IRQ_PREEMPT: | |
1077 | .ascii "IRQ (PREEMPTED)\0" | |
1078 | type_SYSCALL_PREEMPT: | |
1079 | .ascii " SYSCALL (PREEMPTED)\0" | |
1080 | ||
1081 | /* | |
1082 | * Trap decoding for stack unwinder | |
1083 | * Tuples are (start addr, end addr, string) | |
1084 | * If return address lies on [start addr, end addr], | |
1085 | * unwinder displays 'string' | |
1086 | */ | |
1087 | ||
1088 | .align 4 | |
1089 | .global microblaze_trap_handlers | |
1090 | microblaze_trap_handlers: | |
1091 | /* Exact matches come first */ | |
1092 | .word ret_from_trap; .word ret_from_trap ; .word type_SYSCALL | |
1093 | .word ret_from_irq ; .word ret_from_irq ; .word type_IRQ | |
1094 | /* Fuzzy matches go here */ | |
1095 | .word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT | |
1096 | .word ret_from_trap; .word TRAP_return ; .word type_SYSCALL_PREEMPT | |
1097 | /* End of table */ | |
1098 | .word 0 ; .word 0 ; .word 0 |