microblaze: Simplify syscall rutine
[deliverable/linux.git] / arch / microblaze / kernel / entry.S
CommitLineData
ca54502b
MS
1/*
2 * Low-level system-call handling, trap handlers and context-switching
3 *
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008-2009 PetaLogix
6 * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
7 * Copyright (C) 2001,2002 NEC Corporation
8 * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
9 *
10 * This file is subject to the terms and conditions of the GNU General
11 * Public License. See the file COPYING in the main directory of this
12 * archive for more details.
13 *
14 * Written by Miles Bader <miles@gnu.org>
15 * Heavily modified by John Williams for Microblaze
16 */
17
18#include <linux/sys.h>
19#include <linux/linkage.h>
20
21#include <asm/entry.h>
22#include <asm/current.h>
23#include <asm/processor.h>
24#include <asm/exceptions.h>
25#include <asm/asm-offsets.h>
26#include <asm/thread_info.h>
27
28#include <asm/page.h>
29#include <asm/unistd.h>
30
31#include <linux/errno.h>
32#include <asm/signal.h>
33
11d51360
MS
34#undef DEBUG
35
ca54502b
MS
36/* The size of a state save frame. */
37#define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE)
38
39/* The offset of the struct pt_regs in a `state save frame' on the stack. */
40#define PTO STATE_SAVE_ARG_SPACE /* 24 the space for args */
41
42#define C_ENTRY(name) .globl name; .align 4; name
43
44/*
45 * Various ways of setting and clearing BIP in flags reg.
46 * This is mucky, but necessary using microblaze version that
47 * allows msr ops to write to BIP
48 */
49#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
50 .macro clear_bip
66f7de86 51 msrclr r0, MSR_BIP
ca54502b
MS
52 nop
53 .endm
54
55 .macro set_bip
66f7de86 56 msrset r0, MSR_BIP
ca54502b
MS
57 nop
58 .endm
59
60 .macro clear_eip
66f7de86 61 msrclr r0, MSR_EIP
ca54502b
MS
62 nop
63 .endm
64
65 .macro set_ee
66f7de86 66 msrset r0, MSR_EE
ca54502b
MS
67 nop
68 .endm
69
70 .macro disable_irq
66f7de86 71 msrclr r0, MSR_IE
ca54502b
MS
72 nop
73 .endm
74
75 .macro enable_irq
66f7de86 76 msrset r0, MSR_IE
ca54502b
MS
77 nop
78 .endm
79
80 .macro set_ums
66f7de86 81 msrset r0, MSR_UMS
ca54502b 82 nop
66f7de86 83 msrclr r0, MSR_VMS
ca54502b
MS
84 nop
85 .endm
86
87 .macro set_vms
66f7de86 88 msrclr r0, MSR_UMS
ca54502b 89 nop
66f7de86 90 msrset r0, MSR_VMS
ca54502b
MS
91 nop
92 .endm
93
b318067e 94 .macro clear_ums
66f7de86 95 msrclr r0, MSR_UMS
b318067e
MS
96 nop
97 .endm
98
ca54502b 99 .macro clear_vms_ums
66f7de86 100 msrclr r0, MSR_VMS | MSR_UMS
ca54502b
MS
101 nop
102 .endm
103#else
104 .macro clear_bip
105 mfs r11, rmsr
106 nop
107 andi r11, r11, ~MSR_BIP
108 mts rmsr, r11
109 nop
110 .endm
111
112 .macro set_bip
113 mfs r11, rmsr
114 nop
115 ori r11, r11, MSR_BIP
116 mts rmsr, r11
117 nop
118 .endm
119
120 .macro clear_eip
121 mfs r11, rmsr
122 nop
123 andi r11, r11, ~MSR_EIP
124 mts rmsr, r11
125 nop
126 .endm
127
128 .macro set_ee
129 mfs r11, rmsr
130 nop
131 ori r11, r11, MSR_EE
132 mts rmsr, r11
133 nop
134 .endm
135
136 .macro disable_irq
137 mfs r11, rmsr
138 nop
139 andi r11, r11, ~MSR_IE
140 mts rmsr, r11
141 nop
142 .endm
143
144 .macro enable_irq
145 mfs r11, rmsr
146 nop
147 ori r11, r11, MSR_IE
148 mts rmsr, r11
149 nop
150 .endm
151
152 .macro set_ums
153 mfs r11, rmsr
154 nop
155 ori r11, r11, MSR_VMS
156 andni r11, r11, MSR_UMS
157 mts rmsr, r11
158 nop
159 .endm
160
161 .macro set_vms
162 mfs r11, rmsr
163 nop
164 ori r11, r11, MSR_VMS
165 andni r11, r11, MSR_UMS
166 mts rmsr, r11
167 nop
168 .endm
169
b318067e
MS
170 .macro clear_ums
171 mfs r11, rmsr
172 nop
173 andni r11, r11, MSR_UMS
174 mts rmsr,r11
175 nop
176 .endm
177
ca54502b
MS
178 .macro clear_vms_ums
179 mfs r11, rmsr
180 nop
181 andni r11, r11, (MSR_VMS|MSR_UMS)
182 mts rmsr,r11
183 nop
184 .endm
185#endif
186
187/* Define how to call high-level functions. With MMU, virtual mode must be
188 * enabled when calling the high-level function. Clobbers R11.
189 * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
190 */
191
192/* turn on virtual protected mode save */
193#define VM_ON \
a4a94dbf 194 set_ums; \
ca54502b 195 rted r0, 2f; \
a4a94dbf
MS
196 nop; \
1972:
ca54502b
MS
198
199/* turn off virtual protected mode save and user mode save*/
200#define VM_OFF \
a4a94dbf 201 clear_vms_ums; \
ca54502b 202 rted r0, TOPHYS(1f); \
a4a94dbf
MS
203 nop; \
2041:
ca54502b
MS
205
206#define SAVE_REGS \
207 swi r2, r1, PTO+PT_R2; /* Save SDA */ \
36f60954
MS
208 swi r3, r1, PTO+PT_R3; \
209 swi r4, r1, PTO+PT_R4; \
ca54502b
MS
210 swi r5, r1, PTO+PT_R5; \
211 swi r6, r1, PTO+PT_R6; \
212 swi r7, r1, PTO+PT_R7; \
213 swi r8, r1, PTO+PT_R8; \
214 swi r9, r1, PTO+PT_R9; \
215 swi r10, r1, PTO+PT_R10; \
216 swi r11, r1, PTO+PT_R11; /* save clobbered regs after rval */\
217 swi r12, r1, PTO+PT_R12; \
218 swi r13, r1, PTO+PT_R13; /* Save SDA2 */ \
219 swi r14, r1, PTO+PT_PC; /* PC, before IRQ/trap */ \
220 swi r15, r1, PTO+PT_R15; /* Save LP */ \
221 swi r18, r1, PTO+PT_R18; /* Save asm scratch reg */ \
222 swi r19, r1, PTO+PT_R19; \
223 swi r20, r1, PTO+PT_R20; \
224 swi r21, r1, PTO+PT_R21; \
225 swi r22, r1, PTO+PT_R22; \
226 swi r23, r1, PTO+PT_R23; \
227 swi r24, r1, PTO+PT_R24; \
228 swi r25, r1, PTO+PT_R25; \
229 swi r26, r1, PTO+PT_R26; \
230 swi r27, r1, PTO+PT_R27; \
231 swi r28, r1, PTO+PT_R28; \
232 swi r29, r1, PTO+PT_R29; \
233 swi r30, r1, PTO+PT_R30; \
234 swi r31, r1, PTO+PT_R31; /* Save current task reg */ \
235 mfs r11, rmsr; /* save MSR */ \
236 nop; \
237 swi r11, r1, PTO+PT_MSR;
238
239#define RESTORE_REGS \
240 lwi r11, r1, PTO+PT_MSR; \
241 mts rmsr , r11; \
242 nop; \
243 lwi r2, r1, PTO+PT_R2; /* restore SDA */ \
36f60954
MS
244 lwi r3, r1, PTO+PT_R3; \
245 lwi r4, r1, PTO+PT_R4; \
ca54502b
MS
246 lwi r5, r1, PTO+PT_R5; \
247 lwi r6, r1, PTO+PT_R6; \
248 lwi r7, r1, PTO+PT_R7; \
249 lwi r8, r1, PTO+PT_R8; \
250 lwi r9, r1, PTO+PT_R9; \
251 lwi r10, r1, PTO+PT_R10; \
252 lwi r11, r1, PTO+PT_R11; /* restore clobbered regs after rval */\
253 lwi r12, r1, PTO+PT_R12; \
254 lwi r13, r1, PTO+PT_R13; /* restore SDA2 */ \
255 lwi r14, r1, PTO+PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
256 lwi r15, r1, PTO+PT_R15; /* restore LP */ \
257 lwi r18, r1, PTO+PT_R18; /* restore asm scratch reg */ \
258 lwi r19, r1, PTO+PT_R19; \
259 lwi r20, r1, PTO+PT_R20; \
260 lwi r21, r1, PTO+PT_R21; \
261 lwi r22, r1, PTO+PT_R22; \
262 lwi r23, r1, PTO+PT_R23; \
263 lwi r24, r1, PTO+PT_R24; \
264 lwi r25, r1, PTO+PT_R25; \
265 lwi r26, r1, PTO+PT_R26; \
266 lwi r27, r1, PTO+PT_R27; \
267 lwi r28, r1, PTO+PT_R28; \
268 lwi r29, r1, PTO+PT_R29; \
269 lwi r30, r1, PTO+PT_R30; \
270 lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */
271
e5d2af2b
MS
272#define SAVE_STATE \
273 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \
274 /* See if already in kernel mode.*/ \
275 mfs r1, rmsr; \
276 nop; \
277 andi r1, r1, MSR_UMS; \
278 bnei r1, 1f; \
279 /* Kernel-mode state save. */ \
280 /* Reload kernel stack-ptr. */ \
281 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
287503fa
MS
282 /* FIXME: I can add these two lines to one */ \
283 /* tophys(r1,r1); */ \
284 /* addik r1, r1, -STATE_SAVE_SIZE; */ \
285 addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE; \
e5d2af2b 286 SAVE_REGS \
e5d2af2b 287 brid 2f; \
da233552 288 swi r1, r1, PTO+PT_MODE; \
e5d2af2b
MS
2891: /* User-mode state save. */ \
290 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
291 tophys(r1,r1); \
292 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
287503fa
MS
293 /* MS these three instructions can be added to one */ \
294 /* addik r1, r1, THREAD_SIZE; */ \
295 /* tophys(r1,r1); */ \
296 /* addik r1, r1, -STATE_SAVE_SIZE; */ \
297 addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE; \
e5d2af2b 298 SAVE_REGS \
e5d2af2b
MS
299 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
300 swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
e7741075 301 swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */ \
e5d2af2b
MS
302 /* MS: I am clearing UMS even in case when I come from kernel space */ \
303 clear_ums; \
3042: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
305
ca54502b
MS
306.text
307
308/*
309 * User trap.
310 *
311 * System calls are handled here.
312 *
313 * Syscall protocol:
314 * Syscall number in r12, args in r5-r10
315 * Return value in r3
316 *
317 * Trap entered via brki instruction, so BIP bit is set, and interrupts
318 * are masked. This is nice, means we don't have to CLI before state save
319 */
320C_ENTRY(_user_exception):
ca54502b 321 addi r14, r14, 4 /* return address is 4 byte after call */
0e41c909 322 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
ca54502b 323
ca54502b
MS
324 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
325 tophys(r1,r1);
326 lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
0e41c909
MS
327 /* MS these three instructions can be added to one */
328 /* addik r1, r1, THREAD_SIZE; */
329 /* tophys(r1,r1); */
330 /* addik r1, r1, -STATE_SAVE_SIZE; */
331 addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE;
ca54502b
MS
332 SAVE_REGS
333
ca54502b
MS
334 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
335 swi r11, r1, PTO+PT_R1; /* Store user SP. */
25f6e596 336 clear_ums;
0e41c909 337 lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
ca54502b
MS
338 /* Save away the syscall number. */
339 swi r12, r1, PTO+PT_R0;
340 tovirt(r1,r1)
341
ca54502b
MS
342/* where the trap should return need -8 to adjust for rtsd r15, 8*/
343/* Jump to the appropriate function for the system call number in r12
344 * (r12 is not preserved), or return an error if r12 is not valid. The LP
345 * register should point to the location where
346 * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
23575483 347
25f6e596
MS
348 /* Step into virtual mode */
349 rtbd r0, 3f
23575483
MS
350 nop
3513:
b1d70c62 352 lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
23575483
MS
353 lwi r11, r11, TI_FLAGS /* get flags in thread info */
354 andi r11, r11, _TIF_WORK_SYSCALL_MASK
355 beqi r11, 4f
356
357 addik r3, r0, -ENOSYS
358 swi r3, r1, PTO + PT_R3
359 brlid r15, do_syscall_trace_enter
360 addik r5, r1, PTO + PT_R0
361
362 # do_syscall_trace_enter returns the new syscall nr.
363 addk r12, r0, r3
364 lwi r5, r1, PTO+PT_R5;
365 lwi r6, r1, PTO+PT_R6;
366 lwi r7, r1, PTO+PT_R7;
367 lwi r8, r1, PTO+PT_R8;
368 lwi r9, r1, PTO+PT_R9;
369 lwi r10, r1, PTO+PT_R10;
3704:
371/* Jump to the appropriate function for the system call number in r12
372 * (r12 is not preserved), or return an error if r12 is not valid.
373 * The LP register should point to the location where the called function
374 * should return. [note that MAKE_SYS_CALL uses label 1] */
375 /* See if the system call number is valid */
ca54502b 376 addi r11, r12, -__NR_syscalls;
23575483 377 bgei r11,5f;
ca54502b
MS
378 /* Figure out which function to use for this system call. */
379 /* Note Microblaze barrel shift is optional, so don't rely on it */
380 add r12, r12, r12; /* convert num -> ptr */
381 add r12, r12, r12;
382
11d51360 383#ifdef DEBUG
ca54502b 384 /* Trac syscalls and stored them to r0_ram */
23575483 385 lwi r3, r12, 0x400 + r0_ram
ca54502b 386 addi r3, r3, 1
23575483 387 swi r3, r12, 0x400 + r0_ram
11d51360 388#endif
23575483
MS
389
390 # Find and jump into the syscall handler.
391 lwi r12, r12, sys_call_table
392 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
b9ea77e2 393 addi r15, r0, ret_from_trap-8
23575483 394 bra r12
ca54502b 395
ca54502b 396 /* The syscall number is invalid, return an error. */
23575483 3975:
9814cc11 398 rtsd r15, 8; /* looks like a normal subroutine return */
ca54502b 399 addi r3, r0, -ENOSYS;
ca54502b 400
23575483 401/* Entry point used to return from a syscall/trap */
ca54502b
MS
402/* We re-enable BIP bit before state restore */
403C_ENTRY(ret_from_trap):
b1d70c62
MS
404 swi r3, r1, PTO + PT_R3
405 swi r4, r1, PTO + PT_R4
406
23575483
MS
407 /* We're returning to user mode, so check for various conditions that
408 * trigger rescheduling. */
b1d70c62
MS
409 /* FIXME: Restructure all these flag checks. */
410 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
23575483
MS
411 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
412 andi r11, r11, _TIF_WORK_SYSCALL_MASK
413 beqi r11, 1f
414
23575483
MS
415 brlid r15, do_syscall_trace_leave
416 addik r5, r1, PTO + PT_R0
23575483 4171:
ca54502b
MS
418 /* We're returning to user mode, so check for various conditions that
419 * trigger rescheduling. */
b1d70c62
MS
420 /* get thread info from current task */
421 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
ca54502b
MS
422 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
423 andi r11, r11, _TIF_NEED_RESCHED;
424 beqi r11, 5f;
425
ca54502b
MS
426 bralid r15, schedule; /* Call scheduler */
427 nop; /* delay slot */
ca54502b
MS
428
429 /* Maybe handle a signal */
b1d70c62
MS
4305: /* get thread info from current task*/
431 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
ca54502b
MS
432 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
433 andi r11, r11, _TIF_SIGPENDING;
434 beqi r11, 1f; /* Signals to handle, handle them */
435
b9ea77e2 436 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
ca54502b
MS
437 addi r7, r0, 1; /* Arg 3: int in_syscall */
438 bralid r15, do_signal; /* Handle any signals */
841d6e8c 439 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
b1d70c62
MS
440
441/* Finally, return to user state. */
96014cc3 4421: set_bip; /* Ints masked for state restore */
8633bebc 443 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
ca54502b
MS
444 VM_OFF;
445 tophys(r1,r1);
446 RESTORE_REGS;
447 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
448 lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
ca54502b
MS
449TRAP_return: /* Make global symbol for debugging */
450 rtbd r14, 0; /* Instructions to return from an IRQ */
451 nop;
452
453
454/* These syscalls need access to the struct pt_regs on the stack, so we
455 implement them in assembly (they're basically all wrappers anyway). */
456
457C_ENTRY(sys_fork_wrapper):
458 addi r5, r0, SIGCHLD /* Arg 0: flags */
459 lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */
b9ea77e2 460 addik r7, r1, PTO /* Arg 2: parent context */
ca54502b
MS
461 add r8. r0, r0 /* Arg 3: (unused) */
462 add r9, r0, r0; /* Arg 4: (unused) */
ca54502b 463 brid do_fork /* Do real work (tail-call) */
9814cc11 464 add r10, r0, r0; /* Arg 5: (unused) */
ca54502b
MS
465
466/* This the initial entry point for a new child thread, with an appropriate
467 stack in place that makes it look the the child is in the middle of an
468 syscall. This function is actually `returned to' from switch_thread
469 (copy_thread makes ret_from_fork the return address in each new thread's
470 saved context). */
471C_ENTRY(ret_from_fork):
472 bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
473 add r3, r5, r0; /* switch_thread returns the prev task */
474 /* ( in the delay slot ) */
ca54502b 475 brid ret_from_trap; /* Do normal trap return */
9814cc11 476 add r3, r0, r0; /* Child's fork call should return 0. */
ca54502b 477
e513588f
AB
478C_ENTRY(sys_vfork):
479 brid microblaze_vfork /* Do real work (tail-call) */
b9ea77e2 480 addik r5, r1, PTO
ca54502b 481
e513588f 482C_ENTRY(sys_clone):
ca54502b 483 bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */
570e3e23 484 lwi r6, r1, PTO + PT_R1; /* If so, use paret's stack ptr */
b9ea77e2
MS
4851: addik r7, r1, PTO; /* Arg 2: parent context */
486 add r8, r0, r0; /* Arg 3: (unused) */
487 add r9, r0, r0; /* Arg 4: (unused) */
b9ea77e2 488 brid do_fork /* Do real work (tail-call) */
9814cc11 489 add r10, r0, r0; /* Arg 5: (unused) */
ca54502b 490
e513588f 491C_ENTRY(sys_execve):
e513588f 492 brid microblaze_execve; /* Do real work (tail-call).*/
9814cc11 493 addik r8, r1, PTO; /* add user context as 4th arg */
ca54502b 494
ca54502b
MS
495C_ENTRY(sys_rt_sigreturn_wrapper):
496 swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
497 swi r4, r1, PTO+PT_R4;
ca54502b 498 brlid r15, sys_rt_sigreturn /* Do real work */
9814cc11 499 addik r5, r1, PTO; /* add user context as 1st arg */
ca54502b
MS
500 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
501 lwi r4, r1, PTO+PT_R4;
502 bri ret_from_trap /* fall through will not work here due to align */
503 nop;
504
505/*
506 * HW EXCEPTION rutine start
507 */
ca54502b 508C_ENTRY(full_exception_trap):
ca54502b
MS
509 /* adjust exception address for privileged instruction
510 * for finding where is it */
511 addik r17, r17, -4
512 SAVE_STATE /* Save registers */
06a54604
MS
513 /* PC, before IRQ/trap - this is one instruction above */
514 swi r17, r1, PTO+PT_PC;
515 tovirt(r1,r1)
ca54502b
MS
516 /* FIXME this can be store directly in PT_ESR reg.
517 * I tested it but there is a fault */
518 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
b9ea77e2 519 addik r15, r0, ret_from_exc - 8
ca54502b
MS
520 mfs r6, resr
521 nop
522 mfs r7, rfsr; /* save FSR */
523 nop
131e4e97
MS
524 mts rfsr, r0; /* Clear sticky fsr */
525 nop
c318d483 526 rted r0, full_exception
9814cc11 527 addik r5, r1, PTO /* parameter struct pt_regs * regs */
ca54502b
MS
528
529/*
530 * Unaligned data trap.
531 *
532 * Unaligned data trap last on 4k page is handled here.
533 *
534 * Trap entered via exception, so EE bit is set, and interrupts
535 * are masked. This is nice, means we don't have to CLI before state save
536 *
537 * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
538 */
539C_ENTRY(unaligned_data_trap):
8b110d15
MS
540 /* MS: I have to save r11 value and then restore it because
541 * set_bit, clear_eip, set_ee use r11 as temp register if MSR
542 * instructions are not used. We don't need to do if MSR instructions
543 * are used and they use r0 instead of r11.
544 * I am using ENTRY_SP which should be primary used only for stack
545 * pointer saving. */
546 swi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
547 set_bip; /* equalize initial state for all possible entries */
548 clear_eip;
549 set_ee;
550 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
ca54502b 551 SAVE_STATE /* Save registers.*/
06a54604
MS
552 /* PC, before IRQ/trap - this is one instruction above */
553 swi r17, r1, PTO+PT_PC;
554 tovirt(r1,r1)
ca54502b 555 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
b9ea77e2 556 addik r15, r0, ret_from_exc-8
ca54502b
MS
557 mfs r3, resr /* ESR */
558 nop
559 mfs r4, rear /* EAR */
560 nop
c318d483 561 rtbd r0, _unaligned_data_exception
b9ea77e2 562 addik r7, r1, PTO /* parameter struct pt_regs * regs */
ca54502b
MS
563
564/*
565 * Page fault traps.
566 *
567 * If the real exception handler (from hw_exception_handler.S) didn't find
568 * the mapping for the process, then we're thrown here to handle such situation.
569 *
570 * Trap entered via exceptions, so EE bit is set, and interrupts
571 * are masked. This is nice, means we don't have to CLI before state save
572 *
573 * Build a standard exception frame for TLB Access errors. All TLB exceptions
574 * will bail out to this point if they can't resolve the lightweight TLB fault.
575 *
576 * The C function called is in "arch/microblaze/mm/fault.c", declared as:
577 * void do_page_fault(struct pt_regs *regs,
578 * unsigned long address,
579 * unsigned long error_code)
580 */
581/* data and intruction trap - which is choose is resolved int fault.c */
582C_ENTRY(page_fault_data_trap):
ca54502b 583 SAVE_STATE /* Save registers.*/
06a54604
MS
584 /* PC, before IRQ/trap - this is one instruction above */
585 swi r17, r1, PTO+PT_PC;
586 tovirt(r1,r1)
ca54502b 587 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
b9ea77e2 588 addik r15, r0, ret_from_exc-8
ca54502b
MS
589 mfs r6, rear /* parameter unsigned long address */
590 nop
591 mfs r7, resr /* parameter unsigned long error_code */
592 nop
c318d483 593 rted r0, do_page_fault
9814cc11 594 addik r5, r1, PTO /* parameter struct pt_regs * regs */
ca54502b
MS
595
596C_ENTRY(page_fault_instr_trap):
ca54502b 597 SAVE_STATE /* Save registers.*/
06a54604
MS
598 /* PC, before IRQ/trap - this is one instruction above */
599 swi r17, r1, PTO+PT_PC;
600 tovirt(r1,r1)
ca54502b 601 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
b9ea77e2 602 addik r15, r0, ret_from_exc-8
ca54502b
MS
603 mfs r6, rear /* parameter unsigned long address */
604 nop
605 ori r7, r0, 0 /* parameter unsigned long error_code */
9814cc11
MS
606 rted r0, do_page_fault
607 addik r5, r1, PTO /* parameter struct pt_regs * regs */
ca54502b
MS
608
609/* Entry point used to return from an exception. */
610C_ENTRY(ret_from_exc):
77f6d226 611 lwi r11, r1, PTO + PT_MODE;
ca54502b
MS
612 bnei r11, 2f; /* See if returning to kernel mode, */
613 /* ... if so, skip resched &c. */
614
615 /* We're returning to user mode, so check for various conditions that
616 trigger rescheduling. */
b1d70c62 617 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
ca54502b
MS
618 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
619 andi r11, r11, _TIF_NEED_RESCHED;
620 beqi r11, 5f;
621
622/* Call the scheduler before returning from a syscall/trap. */
623 bralid r15, schedule; /* Call scheduler */
624 nop; /* delay slot */
625
626 /* Maybe handle a signal */
b1d70c62 6275: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
ca54502b
MS
628 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
629 andi r11, r11, _TIF_SIGPENDING;
630 beqi r11, 1f; /* Signals to handle, handle them */
631
632 /*
633 * Handle a signal return; Pending signals should be in r18.
634 *
635 * Not all registers are saved by the normal trap/interrupt entry
636 * points (for instance, call-saved registers (because the normal
637 * C-compiler calling sequence in the kernel makes sure they're
638 * preserved), and call-clobbered registers in the case of
639 * traps), but signal handlers may want to examine or change the
640 * complete register state. Here we save anything not saved by
641 * the normal entry sequence, so that it may be safely restored
36f60954 642 * (in a possibly modified form) after do_signal returns. */
b9ea77e2 643 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
ca54502b
MS
644 addi r7, r0, 0; /* Arg 3: int in_syscall */
645 bralid r15, do_signal; /* Handle any signals */
841d6e8c 646 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
ca54502b
MS
647
648/* Finally, return to user state. */
96014cc3 6491: set_bip; /* Ints masked for state restore */
8633bebc 650 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
ca54502b
MS
651 VM_OFF;
652 tophys(r1,r1);
653
ca54502b
MS
654 RESTORE_REGS;
655 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
656
657 lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
658 bri 6f;
659/* Return to kernel state. */
96014cc3
MS
6602: set_bip; /* Ints masked for state restore */
661 VM_OFF;
ca54502b 662 tophys(r1,r1);
ca54502b
MS
663 RESTORE_REGS;
664 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
665
666 tovirt(r1,r1);
6676:
668EXC_return: /* Make global symbol for debugging */
669 rtbd r14, 0; /* Instructions to return from an IRQ */
670 nop;
671
672/*
673 * HW EXCEPTION rutine end
674 */
675
676/*
677 * Hardware maskable interrupts.
678 *
679 * The stack-pointer (r1) should have already been saved to the memory
680 * location PER_CPU(ENTRY_SP).
681 */
682C_ENTRY(_interrupt):
683/* MS: we are in physical address */
684/* Save registers, switch to proper stack, convert SP to virtual.*/
685 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
ca54502b 686 /* MS: See if already in kernel mode. */
653e447e 687 mfs r1, rmsr
5c0d72b1 688 nop
653e447e
MS
689 andi r1, r1, MSR_UMS
690 bnei r1, 1f
ca54502b
MS
691
692/* Kernel-mode state save. */
653e447e
MS
693 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
694 tophys(r1,r1); /* MS: I have in r1 physical address where stack is */
ca54502b
MS
695 /* save registers */
696/* MS: Make room on the stack -> activation record */
697 addik r1, r1, -STATE_SAVE_SIZE;
ca54502b 698 SAVE_REGS
ca54502b 699 brid 2f;
0a6b08fd 700 swi r1, r1, PTO + PT_MODE; /* 0 - user mode, 1 - kernel mode */
ca54502b
MS
7011:
702/* User-mode state save. */
ca54502b
MS
703 /* MS: get the saved current */
704 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
705 tophys(r1,r1);
706 lwi r1, r1, TS_THREAD_INFO;
707 addik r1, r1, THREAD_SIZE;
708 tophys(r1,r1);
709 /* save registers */
710 addik r1, r1, -STATE_SAVE_SIZE;
ca54502b
MS
711 SAVE_REGS
712 /* calculate mode */
713 swi r0, r1, PTO + PT_MODE;
714 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
715 swi r11, r1, PTO+PT_R1;
80c5ff6b 716 clear_ums;
ca54502b 7172:
b1d70c62 718 lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
ca54502b 719 tovirt(r1,r1)
b9ea77e2 720 addik r15, r0, irq_call;
80c5ff6b
MS
721irq_call:rtbd r0, do_IRQ;
722 addik r5, r1, PTO;
ca54502b
MS
723
724/* MS: we are in virtual mode */
725ret_from_irq:
726 lwi r11, r1, PTO + PT_MODE;
727 bnei r11, 2f;
728
b1d70c62 729 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
ca54502b
MS
730 lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
731 andi r11, r11, _TIF_NEED_RESCHED;
732 beqi r11, 5f
733 bralid r15, schedule;
734 nop; /* delay slot */
735
736 /* Maybe handle a signal */
b1d70c62 7375: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */
ca54502b
MS
738 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
739 andi r11, r11, _TIF_SIGPENDING;
740 beqid r11, no_intr_resched
741/* Handle a signal return; Pending signals should be in r18. */
742 addi r7, r0, 0; /* Arg 3: int in_syscall */
b9ea77e2 743 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
ca54502b
MS
744 bralid r15, do_signal; /* Handle any signals */
745 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
746
747/* Finally, return to user state. */
748no_intr_resched:
749 /* Disable interrupts, we are now committed to the state restore */
750 disable_irq
8633bebc 751 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
ca54502b
MS
752 VM_OFF;
753 tophys(r1,r1);
ca54502b
MS
754 RESTORE_REGS
755 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
756 lwi r1, r1, PT_R1 - PT_SIZE;
757 bri 6f;
758/* MS: Return to kernel state. */
77753790
MS
7592:
760#ifdef CONFIG_PREEMPT
b1d70c62 761 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
77753790
MS
762 /* MS: get preempt_count from thread info */
763 lwi r5, r11, TI_PREEMPT_COUNT;
764 bgti r5, restore;
765
766 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
767 andi r5, r5, _TIF_NEED_RESCHED;
768 beqi r5, restore /* if zero jump over */
769
770preempt:
771 /* interrupts are off that's why I am calling preempt_chedule_irq */
772 bralid r15, preempt_schedule_irq
773 nop
b1d70c62 774 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
77753790
MS
775 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
776 andi r5, r5, _TIF_NEED_RESCHED;
777 bnei r5, preempt /* if non zero jump to resched */
778restore:
779#endif
780 VM_OFF /* MS: turn off MMU */
ca54502b 781 tophys(r1,r1)
ca54502b
MS
782 RESTORE_REGS
783 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
784 tovirt(r1,r1);
7856:
786IRQ_return: /* MS: Make global symbol for debugging */
787 rtid r14, 0
788 nop
789
790/*
791 * `Debug' trap
792 * We enter dbtrap in "BIP" (breakpoint) mode.
793 * So we exit the breakpoint mode with an 'rtbd' and proceed with the
794 * original dbtrap.
795 * however, wait to save state first
796 */
797C_ENTRY(_debug_exception):
798 /* BIP bit is set on entry, no interrupts can occur */
799 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
800
653e447e 801 mfs r1, rmsr
5c0d72b1 802 nop
653e447e
MS
803 andi r1, r1, MSR_UMS
804 bnei r1, 1f
ca54502b 805 /* Kernel-mode state save. */
653e447e
MS
806 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
807 tophys(r1,r1);
ca54502b
MS
808
809 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
ca54502b
MS
810 SAVE_REGS;
811
77f6d226 812 swi r1, r1, PTO + PT_MODE;
ca54502b
MS
813 brid 2f;
814 nop; /* Fill delay slot */
8151: /* User-mode state save. */
ca54502b
MS
816 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
817 tophys(r1,r1);
818 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
819 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
820 tophys(r1,r1);
821
822 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
ca54502b
MS
823 SAVE_REGS;
824
77f6d226 825 swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */
ca54502b
MS
826 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
827 swi r11, r1, PTO+PT_R1; /* Store user SP. */
653e447e 8282:
ca54502b
MS
829 tovirt(r1,r1)
830
06b28640 831 set_vms;
ca54502b
MS
832 addi r5, r0, SIGTRAP /* send the trap signal */
833 add r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */
834 addk r7, r0, r0 /* 3rd param zero */
06b28640 835dbtrap_call: rtbd r0, send_sig;
b9ea77e2 836 addik r15, r0, dbtrap_call;
ca54502b
MS
837
838 set_bip; /* Ints masked for state restore*/
77f6d226 839 lwi r11, r1, PTO + PT_MODE;
ca54502b
MS
840 bnei r11, 2f;
841
842 /* Get current task ptr into r11 */
b1d70c62 843 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
ca54502b
MS
844 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
845 andi r11, r11, _TIF_NEED_RESCHED;
846 beqi r11, 5f;
847
848/* Call the scheduler before returning from a syscall/trap. */
849
850 bralid r15, schedule; /* Call scheduler */
851 nop; /* delay slot */
852 /* XXX Is PT_DTRACE handling needed here? */
853 /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */
854
855 /* Maybe handle a signal */
b1d70c62 8565: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
ca54502b
MS
857 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
858 andi r11, r11, _TIF_SIGPENDING;
859 beqi r11, 1f; /* Signals to handle, handle them */
860
861/* Handle a signal return; Pending signals should be in r18. */
862 /* Not all registers are saved by the normal trap/interrupt entry
863 points (for instance, call-saved registers (because the normal
864 C-compiler calling sequence in the kernel makes sure they're
865 preserved), and call-clobbered registers in the case of
866 traps), but signal handlers may want to examine or change the
867 complete register state. Here we save anything not saved by
868 the normal entry sequence, so that it may be safely restored
869 (in a possibly modified form) after do_signal returns. */
870
b9ea77e2 871 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
ca54502b
MS
872 addi r7, r0, 0; /* Arg 3: int in_syscall */
873 bralid r15, do_signal; /* Handle any signals */
841d6e8c 874 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
ca54502b
MS
875
876
877/* Finally, return to user state. */
5c0d72b1 8781:
8633bebc 879 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
ca54502b
MS
880 VM_OFF;
881 tophys(r1,r1);
882
ca54502b
MS
883 RESTORE_REGS
884 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
885
886
887 lwi r1, r1, PT_R1 - PT_SIZE;
888 /* Restore user stack pointer. */
889 bri 6f;
890
891/* Return to kernel state. */
8922: VM_OFF;
893 tophys(r1,r1);
ca54502b
MS
894 RESTORE_REGS
895 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
896
897 tovirt(r1,r1);
8986:
899DBTRAP_return: /* Make global symbol for debugging */
900 rtbd r14, 0; /* Instructions to return from an IRQ */
901 nop;
902
903
904
905ENTRY(_switch_to)
906 /* prepare return value */
b1d70c62 907 addk r3, r0, CURRENT_TASK
ca54502b
MS
908
909 /* save registers in cpu_context */
910 /* use r11 and r12, volatile registers, as temp register */
911 /* give start of cpu_context for previous process */
912 addik r11, r5, TI_CPU_CONTEXT
913 swi r1, r11, CC_R1
914 swi r2, r11, CC_R2
915 /* skip volatile registers.
916 * they are saved on stack when we jumped to _switch_to() */
917 /* dedicated registers */
918 swi r13, r11, CC_R13
919 swi r14, r11, CC_R14
920 swi r15, r11, CC_R15
921 swi r16, r11, CC_R16
922 swi r17, r11, CC_R17
923 swi r18, r11, CC_R18
924 /* save non-volatile registers */
925 swi r19, r11, CC_R19
926 swi r20, r11, CC_R20
927 swi r21, r11, CC_R21
928 swi r22, r11, CC_R22
929 swi r23, r11, CC_R23
930 swi r24, r11, CC_R24
931 swi r25, r11, CC_R25
932 swi r26, r11, CC_R26
933 swi r27, r11, CC_R27
934 swi r28, r11, CC_R28
935 swi r29, r11, CC_R29
936 swi r30, r11, CC_R30
937 /* special purpose registers */
938 mfs r12, rmsr
939 nop
940 swi r12, r11, CC_MSR
941 mfs r12, rear
942 nop
943 swi r12, r11, CC_EAR
944 mfs r12, resr
945 nop
946 swi r12, r11, CC_ESR
947 mfs r12, rfsr
948 nop
949 swi r12, r11, CC_FSR
950
b1d70c62
MS
951 /* update r31, the current-give me pointer to task which will be next */
952 lwi CURRENT_TASK, r6, TI_TASK
ca54502b 953 /* stored it to current_save too */
b1d70c62 954 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
ca54502b
MS
955
956 /* get new process' cpu context and restore */
957 /* give me start where start context of next task */
958 addik r11, r6, TI_CPU_CONTEXT
959
960 /* non-volatile registers */
961 lwi r30, r11, CC_R30
962 lwi r29, r11, CC_R29
963 lwi r28, r11, CC_R28
964 lwi r27, r11, CC_R27
965 lwi r26, r11, CC_R26
966 lwi r25, r11, CC_R25
967 lwi r24, r11, CC_R24
968 lwi r23, r11, CC_R23
969 lwi r22, r11, CC_R22
970 lwi r21, r11, CC_R21
971 lwi r20, r11, CC_R20
972 lwi r19, r11, CC_R19
973 /* dedicated registers */
974 lwi r18, r11, CC_R18
975 lwi r17, r11, CC_R17
976 lwi r16, r11, CC_R16
977 lwi r15, r11, CC_R15
978 lwi r14, r11, CC_R14
979 lwi r13, r11, CC_R13
980 /* skip volatile registers */
981 lwi r2, r11, CC_R2
982 lwi r1, r11, CC_R1
983
984 /* special purpose registers */
985 lwi r12, r11, CC_FSR
986 mts rfsr, r12
987 nop
988 lwi r12, r11, CC_MSR
989 mts rmsr, r12
990 nop
991
992 rtsd r15, 8
993 nop
994
995ENTRY(_reset)
996 brai 0x70; /* Jump back to FS-boot */
997
998ENTRY(_break)
999 mfs r5, rmsr
1000 nop
1001 swi r5, r0, 0x250 + TOPHYS(r0_ram)
1002 mfs r5, resr
1003 nop
1004 swi r5, r0, 0x254 + TOPHYS(r0_ram)
1005 bri 0
1006
1007 /* These are compiled and loaded into high memory, then
1008 * copied into place in mach_early_setup */
1009 .section .init.ivt, "ax"
1010 .org 0x0
1011 /* this is very important - here is the reset vector */
1012 /* in current MMU branch you don't care what is here - it is
1013 * used from bootloader site - but this is correct for FS-BOOT */
1014 brai 0x70
1015 nop
1016 brai TOPHYS(_user_exception); /* syscall handler */
1017 brai TOPHYS(_interrupt); /* Interrupt handler */
1018 brai TOPHYS(_break); /* nmi trap handler */
1019 brai TOPHYS(_hw_exception_handler); /* HW exception handler */
1020
1021 .org 0x60
1022 brai TOPHYS(_debug_exception); /* debug trap handler*/
1023
1024.section .rodata,"a"
1025#include "syscall_table.S"
1026
1027syscall_table_size=(.-sys_call_table)
1028
ce3266c0
SM
1029type_SYSCALL:
1030 .ascii "SYSCALL\0"
1031type_IRQ:
1032 .ascii "IRQ\0"
1033type_IRQ_PREEMPT:
1034 .ascii "IRQ (PREEMPTED)\0"
1035type_SYSCALL_PREEMPT:
1036 .ascii " SYSCALL (PREEMPTED)\0"
1037
1038 /*
1039 * Trap decoding for stack unwinder
1040 * Tuples are (start addr, end addr, string)
1041 * If return address lies on [start addr, end addr],
1042 * unwinder displays 'string'
1043 */
1044
1045 .align 4
1046.global microblaze_trap_handlers
1047microblaze_trap_handlers:
1048 /* Exact matches come first */
1049 .word ret_from_trap; .word ret_from_trap ; .word type_SYSCALL
1050 .word ret_from_irq ; .word ret_from_irq ; .word type_IRQ
1051 /* Fuzzy matches go here */
1052 .word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT
1053 .word ret_from_trap; .word TRAP_return ; .word type_SYSCALL_PREEMPT
1054 /* End of table */
1055 .word 0 ; .word 0 ; .word 0
This page took 0.184348 seconds and 5 git commands to generate.