microblaze: Fix VM_ON and VM_OFF macros
[deliverable/linux.git] / arch / microblaze / kernel / entry.S
CommitLineData
ca54502b
MS
1/*
2 * Low-level system-call handling, trap handlers and context-switching
3 *
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008-2009 PetaLogix
6 * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
7 * Copyright (C) 2001,2002 NEC Corporation
8 * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
9 *
10 * This file is subject to the terms and conditions of the GNU General
11 * Public License. See the file COPYING in the main directory of this
12 * archive for more details.
13 *
14 * Written by Miles Bader <miles@gnu.org>
15 * Heavily modified by John Williams for Microblaze
16 */
17
18#include <linux/sys.h>
19#include <linux/linkage.h>
20
21#include <asm/entry.h>
22#include <asm/current.h>
23#include <asm/processor.h>
24#include <asm/exceptions.h>
25#include <asm/asm-offsets.h>
26#include <asm/thread_info.h>
27
28#include <asm/page.h>
29#include <asm/unistd.h>
30
31#include <linux/errno.h>
32#include <asm/signal.h>
33
11d51360
MS
34#undef DEBUG
35
ca54502b
MS
36/* The size of a state save frame. */
37#define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE)
38
39/* The offset of the struct pt_regs in a `state save frame' on the stack. */
40#define PTO STATE_SAVE_ARG_SPACE /* 24 the space for args */
41
42#define C_ENTRY(name) .globl name; .align 4; name
43
44/*
45 * Various ways of setting and clearing BIP in flags reg.
46 * This is mucky, but necessary using microblaze version that
47 * allows msr ops to write to BIP
48 */
49#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
50 .macro clear_bip
51 msrclr r11, MSR_BIP
52 nop
53 .endm
54
55 .macro set_bip
56 msrset r11, MSR_BIP
57 nop
58 .endm
59
60 .macro clear_eip
61 msrclr r11, MSR_EIP
62 nop
63 .endm
64
65 .macro set_ee
66 msrset r11, MSR_EE
67 nop
68 .endm
69
70 .macro disable_irq
71 msrclr r11, MSR_IE
72 nop
73 .endm
74
75 .macro enable_irq
76 msrset r11, MSR_IE
77 nop
78 .endm
79
80 .macro set_ums
81 msrset r11, MSR_UMS
82 nop
83 msrclr r11, MSR_VMS
84 nop
85 .endm
86
87 .macro set_vms
88 msrclr r11, MSR_UMS
89 nop
90 msrset r11, MSR_VMS
91 nop
92 .endm
93
94 .macro clear_vms_ums
95 msrclr r11, MSR_VMS
96 nop
97 msrclr r11, MSR_UMS
98 nop
99 .endm
100#else
101 .macro clear_bip
102 mfs r11, rmsr
103 nop
104 andi r11, r11, ~MSR_BIP
105 mts rmsr, r11
106 nop
107 .endm
108
109 .macro set_bip
110 mfs r11, rmsr
111 nop
112 ori r11, r11, MSR_BIP
113 mts rmsr, r11
114 nop
115 .endm
116
117 .macro clear_eip
118 mfs r11, rmsr
119 nop
120 andi r11, r11, ~MSR_EIP
121 mts rmsr, r11
122 nop
123 .endm
124
125 .macro set_ee
126 mfs r11, rmsr
127 nop
128 ori r11, r11, MSR_EE
129 mts rmsr, r11
130 nop
131 .endm
132
133 .macro disable_irq
134 mfs r11, rmsr
135 nop
136 andi r11, r11, ~MSR_IE
137 mts rmsr, r11
138 nop
139 .endm
140
141 .macro enable_irq
142 mfs r11, rmsr
143 nop
144 ori r11, r11, MSR_IE
145 mts rmsr, r11
146 nop
147 .endm
148
149 .macro set_ums
150 mfs r11, rmsr
151 nop
152 ori r11, r11, MSR_VMS
153 andni r11, r11, MSR_UMS
154 mts rmsr, r11
155 nop
156 .endm
157
158 .macro set_vms
159 mfs r11, rmsr
160 nop
161 ori r11, r11, MSR_VMS
162 andni r11, r11, MSR_UMS
163 mts rmsr, r11
164 nop
165 .endm
166
167 .macro clear_vms_ums
168 mfs r11, rmsr
169 nop
170 andni r11, r11, (MSR_VMS|MSR_UMS)
171 mts rmsr,r11
172 nop
173 .endm
174#endif
175
176/* Define how to call high-level functions. With MMU, virtual mode must be
177 * enabled when calling the high-level function. Clobbers R11.
178 * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
179 */
180
181/* turn on virtual protected mode save */
182#define VM_ON \
a4a94dbf 183 set_ums; \
ca54502b 184 rted r0, 2f; \
a4a94dbf
MS
185 nop; \
1862:
ca54502b
MS
187
188/* turn off virtual protected mode save and user mode save*/
189#define VM_OFF \
a4a94dbf 190 clear_vms_ums; \
ca54502b 191 rted r0, TOPHYS(1f); \
a4a94dbf
MS
192 nop; \
1931:
ca54502b
MS
194
195#define SAVE_REGS \
196 swi r2, r1, PTO+PT_R2; /* Save SDA */ \
197 swi r5, r1, PTO+PT_R5; \
198 swi r6, r1, PTO+PT_R6; \
199 swi r7, r1, PTO+PT_R7; \
200 swi r8, r1, PTO+PT_R8; \
201 swi r9, r1, PTO+PT_R9; \
202 swi r10, r1, PTO+PT_R10; \
203 swi r11, r1, PTO+PT_R11; /* save clobbered regs after rval */\
204 swi r12, r1, PTO+PT_R12; \
205 swi r13, r1, PTO+PT_R13; /* Save SDA2 */ \
206 swi r14, r1, PTO+PT_PC; /* PC, before IRQ/trap */ \
207 swi r15, r1, PTO+PT_R15; /* Save LP */ \
208 swi r18, r1, PTO+PT_R18; /* Save asm scratch reg */ \
209 swi r19, r1, PTO+PT_R19; \
210 swi r20, r1, PTO+PT_R20; \
211 swi r21, r1, PTO+PT_R21; \
212 swi r22, r1, PTO+PT_R22; \
213 swi r23, r1, PTO+PT_R23; \
214 swi r24, r1, PTO+PT_R24; \
215 swi r25, r1, PTO+PT_R25; \
216 swi r26, r1, PTO+PT_R26; \
217 swi r27, r1, PTO+PT_R27; \
218 swi r28, r1, PTO+PT_R28; \
219 swi r29, r1, PTO+PT_R29; \
220 swi r30, r1, PTO+PT_R30; \
221 swi r31, r1, PTO+PT_R31; /* Save current task reg */ \
222 mfs r11, rmsr; /* save MSR */ \
223 nop; \
224 swi r11, r1, PTO+PT_MSR;
225
226#define RESTORE_REGS \
227 lwi r11, r1, PTO+PT_MSR; \
228 mts rmsr , r11; \
229 nop; \
230 lwi r2, r1, PTO+PT_R2; /* restore SDA */ \
231 lwi r5, r1, PTO+PT_R5; \
232 lwi r6, r1, PTO+PT_R6; \
233 lwi r7, r1, PTO+PT_R7; \
234 lwi r8, r1, PTO+PT_R8; \
235 lwi r9, r1, PTO+PT_R9; \
236 lwi r10, r1, PTO+PT_R10; \
237 lwi r11, r1, PTO+PT_R11; /* restore clobbered regs after rval */\
238 lwi r12, r1, PTO+PT_R12; \
239 lwi r13, r1, PTO+PT_R13; /* restore SDA2 */ \
240 lwi r14, r1, PTO+PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
241 lwi r15, r1, PTO+PT_R15; /* restore LP */ \
242 lwi r18, r1, PTO+PT_R18; /* restore asm scratch reg */ \
243 lwi r19, r1, PTO+PT_R19; \
244 lwi r20, r1, PTO+PT_R20; \
245 lwi r21, r1, PTO+PT_R21; \
246 lwi r22, r1, PTO+PT_R22; \
247 lwi r23, r1, PTO+PT_R23; \
248 lwi r24, r1, PTO+PT_R24; \
249 lwi r25, r1, PTO+PT_R25; \
250 lwi r26, r1, PTO+PT_R26; \
251 lwi r27, r1, PTO+PT_R27; \
252 lwi r28, r1, PTO+PT_R28; \
253 lwi r29, r1, PTO+PT_R29; \
254 lwi r30, r1, PTO+PT_R30; \
255 lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */
256
257.text
258
259/*
260 * User trap.
261 *
262 * System calls are handled here.
263 *
264 * Syscall protocol:
265 * Syscall number in r12, args in r5-r10
266 * Return value in r3
267 *
268 * Trap entered via brki instruction, so BIP bit is set, and interrupts
269 * are masked. This is nice, means we don't have to CLI before state save
270 */
271C_ENTRY(_user_exception):
272 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
273 addi r14, r14, 4 /* return address is 4 byte after call */
274 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
275
276 lwi r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
277 beqi r11, 1f; /* Jump ahead if coming from user */
278/* Kernel-mode state save. */
279 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
280 tophys(r1,r11);
281 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
282 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
283
284 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
285 SAVE_REGS
286
287 addi r11, r0, 1; /* Was in kernel-mode. */
288 swi r11, r1, PTO+PT_MODE; /* pt_regs -> kernel mode */
289 brid 2f;
290 nop; /* Fill delay slot */
291
292/* User-mode state save. */
2931:
294 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
295 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
296 tophys(r1,r1);
297 lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
298/* calculate kernel stack pointer from task struct 8k */
299 addik r1, r1, THREAD_SIZE;
300 tophys(r1,r1);
301
302 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
303 SAVE_REGS
304
305 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */
306 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
307 swi r11, r1, PTO+PT_R1; /* Store user SP. */
308 addi r11, r0, 1;
309 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
b1d70c62 3102: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
ca54502b
MS
311 /* Save away the syscall number. */
312 swi r12, r1, PTO+PT_R0;
313 tovirt(r1,r1)
314
ca54502b
MS
315/* where the trap should return need -8 to adjust for rtsd r15, 8*/
316/* Jump to the appropriate function for the system call number in r12
317 * (r12 is not preserved), or return an error if r12 is not valid. The LP
318 * register should point to the location where
319 * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
23575483
MS
320
321 # Step into virtual mode.
322 set_vms;
323 addik r11, r0, 3f
324 rtid r11, 0
325 nop
3263:
b1d70c62 327 lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
23575483
MS
328 lwi r11, r11, TI_FLAGS /* get flags in thread info */
329 andi r11, r11, _TIF_WORK_SYSCALL_MASK
330 beqi r11, 4f
331
332 addik r3, r0, -ENOSYS
333 swi r3, r1, PTO + PT_R3
334 brlid r15, do_syscall_trace_enter
335 addik r5, r1, PTO + PT_R0
336
337 # do_syscall_trace_enter returns the new syscall nr.
338 addk r12, r0, r3
339 lwi r5, r1, PTO+PT_R5;
340 lwi r6, r1, PTO+PT_R6;
341 lwi r7, r1, PTO+PT_R7;
342 lwi r8, r1, PTO+PT_R8;
343 lwi r9, r1, PTO+PT_R9;
344 lwi r10, r1, PTO+PT_R10;
3454:
346/* Jump to the appropriate function for the system call number in r12
347 * (r12 is not preserved), or return an error if r12 is not valid.
348 * The LP register should point to the location where the called function
349 * should return. [note that MAKE_SYS_CALL uses label 1] */
350 /* See if the system call number is valid */
ca54502b 351 addi r11, r12, -__NR_syscalls;
23575483 352 bgei r11,5f;
ca54502b
MS
353 /* Figure out which function to use for this system call. */
354 /* Note Microblaze barrel shift is optional, so don't rely on it */
355 add r12, r12, r12; /* convert num -> ptr */
356 add r12, r12, r12;
357
11d51360 358#ifdef DEBUG
ca54502b 359 /* Trac syscalls and stored them to r0_ram */
23575483 360 lwi r3, r12, 0x400 + r0_ram
ca54502b 361 addi r3, r3, 1
23575483 362 swi r3, r12, 0x400 + r0_ram
11d51360 363#endif
23575483
MS
364
365 # Find and jump into the syscall handler.
366 lwi r12, r12, sys_call_table
367 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
368 la r15, r0, ret_from_trap-8
369 bra r12
ca54502b 370
ca54502b 371 /* The syscall number is invalid, return an error. */
23575483 3725:
ca54502b
MS
373 addi r3, r0, -ENOSYS;
374 rtsd r15,8; /* looks like a normal subroutine return */
375 or r0, r0, r0
376
377
23575483 378/* Entry point used to return from a syscall/trap */
ca54502b
MS
379/* We re-enable BIP bit before state restore */
380C_ENTRY(ret_from_trap):
381 set_bip; /* Ints masked for state restore*/
382 lwi r11, r1, PTO+PT_MODE;
383/* See if returning to kernel mode, if so, skip resched &c. */
384 bnei r11, 2f;
385
b1d70c62
MS
386 swi r3, r1, PTO + PT_R3
387 swi r4, r1, PTO + PT_R4
388
23575483
MS
389 /* We're returning to user mode, so check for various conditions that
390 * trigger rescheduling. */
b1d70c62
MS
391 /* FIXME: Restructure all these flag checks. */
392 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
23575483
MS
393 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
394 andi r11, r11, _TIF_WORK_SYSCALL_MASK
395 beqi r11, 1f
396
23575483
MS
397 brlid r15, do_syscall_trace_leave
398 addik r5, r1, PTO + PT_R0
23575483 3991:
ca54502b
MS
400 /* We're returning to user mode, so check for various conditions that
401 * trigger rescheduling. */
b1d70c62
MS
402 /* get thread info from current task */
403 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
ca54502b
MS
404 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
405 andi r11, r11, _TIF_NEED_RESCHED;
406 beqi r11, 5f;
407
ca54502b
MS
408 bralid r15, schedule; /* Call scheduler */
409 nop; /* delay slot */
ca54502b
MS
410
411 /* Maybe handle a signal */
b1d70c62
MS
4125: /* get thread info from current task*/
413 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
ca54502b
MS
414 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
415 andi r11, r11, _TIF_SIGPENDING;
416 beqi r11, 1f; /* Signals to handle, handle them */
417
ca54502b 418 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
ca54502b
MS
419 addi r7, r0, 1; /* Arg 3: int in_syscall */
420 bralid r15, do_signal; /* Handle any signals */
841d6e8c 421 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
b1d70c62
MS
422
423/* Finally, return to user state. */
4241:
ca54502b
MS
425 lwi r3, r1, PTO + PT_R3; /* restore syscall result */
426 lwi r4, r1, PTO + PT_R4;
427
b1d70c62 428 swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
8633bebc 429 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
ca54502b
MS
430 VM_OFF;
431 tophys(r1,r1);
432 RESTORE_REGS;
433 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
434 lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
435 bri 6f;
436
437/* Return to kernel state. */
4382: VM_OFF;
439 tophys(r1,r1);
440 RESTORE_REGS;
441 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
442 tovirt(r1,r1);
4436:
444TRAP_return: /* Make global symbol for debugging */
445 rtbd r14, 0; /* Instructions to return from an IRQ */
446 nop;
447
448
449/* These syscalls need access to the struct pt_regs on the stack, so we
450 implement them in assembly (they're basically all wrappers anyway). */
451
452C_ENTRY(sys_fork_wrapper):
453 addi r5, r0, SIGCHLD /* Arg 0: flags */
454 lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */
455 la r7, r1, PTO /* Arg 2: parent context */
456 add r8. r0, r0 /* Arg 3: (unused) */
457 add r9, r0, r0; /* Arg 4: (unused) */
458 add r10, r0, r0; /* Arg 5: (unused) */
459 brid do_fork /* Do real work (tail-call) */
460 nop;
461
462/* This the initial entry point for a new child thread, with an appropriate
463 stack in place that makes it look the the child is in the middle of an
464 syscall. This function is actually `returned to' from switch_thread
465 (copy_thread makes ret_from_fork the return address in each new thread's
466 saved context). */
467C_ENTRY(ret_from_fork):
468 bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
469 add r3, r5, r0; /* switch_thread returns the prev task */
470 /* ( in the delay slot ) */
471 add r3, r0, r0; /* Child's fork call should return 0. */
472 brid ret_from_trap; /* Do normal trap return */
473 nop;
474
e513588f
AB
475C_ENTRY(sys_vfork):
476 brid microblaze_vfork /* Do real work (tail-call) */
ca54502b 477 la r5, r1, PTO
ca54502b 478
e513588f 479C_ENTRY(sys_clone):
ca54502b 480 bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */
570e3e23
MS
481 lwi r6, r1, PTO + PT_R1; /* If so, use paret's stack ptr */
4821: add r10, r0, r9; /* Arg 6: (child_tidptr) */
483 add r9, r0, r8; /* Arg 5: (parent_tidptr) */
484 add r8, r0, r7; /* Arg 4: (stack_size) */
485 la r7, r1, PTO; /* Arg 3: pt_regs */
486 brid do_fork /* Do real work (tail-call) */
487 nop
ca54502b 488
e513588f 489C_ENTRY(sys_execve):
ca54502b 490 la r8, r1, PTO; /* add user context as 4th arg */
e513588f 491 brid microblaze_execve; /* Do real work (tail-call).*/
ca54502b
MS
492 nop;
493
ca54502b
MS
494C_ENTRY(sys_rt_sigreturn_wrapper):
495 swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
496 swi r4, r1, PTO+PT_R4;
497 la r5, r1, PTO; /* add user context as 1st arg */
498 brlid r15, sys_rt_sigreturn /* Do real work */
499 nop;
500 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
501 lwi r4, r1, PTO+PT_R4;
502 bri ret_from_trap /* fall through will not work here due to align */
503 nop;
504
505/*
506 * HW EXCEPTION rutine start
507 */
508
509#define SAVE_STATE \
510 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */ \
511 set_bip; /*equalize initial state for all possible entries*/\
512 clear_eip; \
513 enable_irq; \
514 set_ee; \
515 /* See if already in kernel mode.*/ \
516 lwi r11, r0, TOPHYS(PER_CPU(KM)); \
517 beqi r11, 1f; /* Jump ahead if coming from user */\
518 /* Kernel-mode state save. */ \
519 /* Reload kernel stack-ptr. */ \
520 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
521 tophys(r1,r11); \
522 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */ \
523 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
524 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
525 /* store return registers separately because \
526 * this macros is use for others exceptions */ \
527 swi r3, r1, PTO + PT_R3; \
528 swi r4, r1, PTO + PT_R4; \
529 SAVE_REGS \
530 /* PC, before IRQ/trap - this is one instruction above */ \
531 swi r17, r1, PTO+PT_PC; \
532 \
533 addi r11, r0, 1; /* Was in kernel-mode. */ \
534 swi r11, r1, PTO+PT_MODE; \
535 brid 2f; \
536 nop; /* Fill delay slot */ \
5371: /* User-mode state save. */ \
538 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
539 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
540 tophys(r1,r1); \
541 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
542 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */\
543 tophys(r1,r1); \
544 \
545 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
546 /* store return registers separately because this macros \
547 * is use for others exceptions */ \
548 swi r3, r1, PTO + PT_R3; \
549 swi r4, r1, PTO + PT_R4; \
550 SAVE_REGS \
551 /* PC, before IRQ/trap - this is one instruction above FIXME*/ \
552 swi r17, r1, PTO+PT_PC; \
553 \
554 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */ \
555 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
556 swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
557 addi r11, r0, 1; \
558 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\
b1d70c62 5592: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); \
ca54502b
MS
560 /* Save away the syscall number. */ \
561 swi r0, r1, PTO+PT_R0; \
562 tovirt(r1,r1)
563
564C_ENTRY(full_exception_trap):
565 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
566 /* adjust exception address for privileged instruction
567 * for finding where is it */
568 addik r17, r17, -4
569 SAVE_STATE /* Save registers */
570 /* FIXME this can be store directly in PT_ESR reg.
571 * I tested it but there is a fault */
572 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
573 la r15, r0, ret_from_exc - 8
574 la r5, r1, PTO /* parameter struct pt_regs * regs */
575 mfs r6, resr
576 nop
577 mfs r7, rfsr; /* save FSR */
578 nop
131e4e97
MS
579 mts rfsr, r0; /* Clear sticky fsr */
580 nop
ca54502b
MS
581 la r12, r0, full_exception
582 set_vms;
583 rtbd r12, 0;
584 nop;
585
586/*
587 * Unaligned data trap.
588 *
589 * Unaligned data trap last on 4k page is handled here.
590 *
591 * Trap entered via exception, so EE bit is set, and interrupts
592 * are masked. This is nice, means we don't have to CLI before state save
593 *
594 * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
595 */
596C_ENTRY(unaligned_data_trap):
597 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
598 SAVE_STATE /* Save registers.*/
599 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
600 la r15, r0, ret_from_exc-8
601 mfs r3, resr /* ESR */
602 nop
603 mfs r4, rear /* EAR */
604 nop
605 la r7, r1, PTO /* parameter struct pt_regs * regs */
606 la r12, r0, _unaligned_data_exception
607 set_vms;
608 rtbd r12, 0; /* interrupts enabled */
609 nop;
610
611/*
612 * Page fault traps.
613 *
614 * If the real exception handler (from hw_exception_handler.S) didn't find
615 * the mapping for the process, then we're thrown here to handle such situation.
616 *
617 * Trap entered via exceptions, so EE bit is set, and interrupts
618 * are masked. This is nice, means we don't have to CLI before state save
619 *
620 * Build a standard exception frame for TLB Access errors. All TLB exceptions
621 * will bail out to this point if they can't resolve the lightweight TLB fault.
622 *
623 * The C function called is in "arch/microblaze/mm/fault.c", declared as:
624 * void do_page_fault(struct pt_regs *regs,
625 * unsigned long address,
626 * unsigned long error_code)
627 */
628/* data and intruction trap - which is choose is resolved int fault.c */
629C_ENTRY(page_fault_data_trap):
630 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
631 SAVE_STATE /* Save registers.*/
632 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
633 la r15, r0, ret_from_exc-8
634 la r5, r1, PTO /* parameter struct pt_regs * regs */
635 mfs r6, rear /* parameter unsigned long address */
636 nop
637 mfs r7, resr /* parameter unsigned long error_code */
638 nop
639 la r12, r0, do_page_fault
640 set_vms;
641 rtbd r12, 0; /* interrupts enabled */
642 nop;
643
644C_ENTRY(page_fault_instr_trap):
645 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
646 SAVE_STATE /* Save registers.*/
647 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
648 la r15, r0, ret_from_exc-8
649 la r5, r1, PTO /* parameter struct pt_regs * regs */
650 mfs r6, rear /* parameter unsigned long address */
651 nop
652 ori r7, r0, 0 /* parameter unsigned long error_code */
653 la r12, r0, do_page_fault
654 set_vms;
655 rtbd r12, 0; /* interrupts enabled */
656 nop;
657
658/* Entry point used to return from an exception. */
659C_ENTRY(ret_from_exc):
660 set_bip; /* Ints masked for state restore*/
661 lwi r11, r1, PTO+PT_MODE;
662 bnei r11, 2f; /* See if returning to kernel mode, */
663 /* ... if so, skip resched &c. */
664
665 /* We're returning to user mode, so check for various conditions that
666 trigger rescheduling. */
b1d70c62 667 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
ca54502b
MS
668 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
669 andi r11, r11, _TIF_NEED_RESCHED;
670 beqi r11, 5f;
671
672/* Call the scheduler before returning from a syscall/trap. */
673 bralid r15, schedule; /* Call scheduler */
674 nop; /* delay slot */
675
676 /* Maybe handle a signal */
b1d70c62 6775: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
ca54502b
MS
678 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
679 andi r11, r11, _TIF_SIGPENDING;
680 beqi r11, 1f; /* Signals to handle, handle them */
681
682 /*
683 * Handle a signal return; Pending signals should be in r18.
684 *
685 * Not all registers are saved by the normal trap/interrupt entry
686 * points (for instance, call-saved registers (because the normal
687 * C-compiler calling sequence in the kernel makes sure they're
688 * preserved), and call-clobbered registers in the case of
689 * traps), but signal handlers may want to examine or change the
690 * complete register state. Here we save anything not saved by
691 * the normal entry sequence, so that it may be safely restored
692 * (in a possibly modified form) after do_signal returns.
693 * store return registers separately because this macros is use
694 * for others exceptions */
ca54502b 695 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
ca54502b
MS
696 addi r7, r0, 0; /* Arg 3: int in_syscall */
697 bralid r15, do_signal; /* Handle any signals */
841d6e8c 698 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
ca54502b
MS
699
700/* Finally, return to user state. */
7011: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
8633bebc 702 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
ca54502b
MS
703 VM_OFF;
704 tophys(r1,r1);
705
706 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
707 lwi r4, r1, PTO+PT_R4;
708 RESTORE_REGS;
709 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
710
711 lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
712 bri 6f;
713/* Return to kernel state. */
7142: VM_OFF;
715 tophys(r1,r1);
716 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
717 lwi r4, r1, PTO+PT_R4;
718 RESTORE_REGS;
719 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
720
721 tovirt(r1,r1);
7226:
723EXC_return: /* Make global symbol for debugging */
724 rtbd r14, 0; /* Instructions to return from an IRQ */
725 nop;
726
727/*
728 * HW EXCEPTION rutine end
729 */
730
731/*
732 * Hardware maskable interrupts.
733 *
734 * The stack-pointer (r1) should have already been saved to the memory
735 * location PER_CPU(ENTRY_SP).
736 */
737C_ENTRY(_interrupt):
738/* MS: we are in physical address */
739/* Save registers, switch to proper stack, convert SP to virtual.*/
740 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
741 swi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
742 /* MS: See if already in kernel mode. */
743 lwi r11, r0, TOPHYS(PER_CPU(KM));
744 beqi r11, 1f; /* MS: Jump ahead if coming from user */
745
746/* Kernel-mode state save. */
747 or r11, r1, r0
748 tophys(r1,r11); /* MS: I have in r1 physical address where stack is */
749/* MS: Save original SP - position PT_R1 to next stack frame 4 *1 - 152*/
750 swi r11, r1, (PT_R1 - PT_SIZE);
751/* MS: restore r11 because of saving in SAVE_REGS */
752 lwi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
753 /* save registers */
754/* MS: Make room on the stack -> activation record */
755 addik r1, r1, -STATE_SAVE_SIZE;
756/* MS: store return registers separately because
757 * this macros is use for others exceptions */
758 swi r3, r1, PTO + PT_R3;
759 swi r4, r1, PTO + PT_R4;
760 SAVE_REGS
761 /* MS: store mode */
762 addi r11, r0, 1; /* MS: Was in kernel-mode. */
763 swi r11, r1, PTO + PT_MODE; /* MS: and save it */
764 brid 2f;
765 nop; /* MS: Fill delay slot */
766
7671:
768/* User-mode state save. */
769/* MS: restore r11 -> FIXME move before SAVE_REG */
770 lwi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
771 /* MS: get the saved current */
772 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
773 tophys(r1,r1);
774 lwi r1, r1, TS_THREAD_INFO;
775 addik r1, r1, THREAD_SIZE;
776 tophys(r1,r1);
777 /* save registers */
778 addik r1, r1, -STATE_SAVE_SIZE;
779 swi r3, r1, PTO+PT_R3;
780 swi r4, r1, PTO+PT_R4;
781 SAVE_REGS
782 /* calculate mode */
783 swi r0, r1, PTO + PT_MODE;
784 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
785 swi r11, r1, PTO+PT_R1;
786 /* setup kernel mode to KM */
787 addi r11, r0, 1;
788 swi r11, r0, TOPHYS(PER_CPU(KM));
789
7902:
b1d70c62 791 lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
ca54502b
MS
792 swi r0, r1, PTO + PT_R0;
793 tovirt(r1,r1)
794 la r5, r1, PTO;
795 set_vms;
796 la r11, r0, do_IRQ;
797 la r15, r0, irq_call;
798irq_call:rtbd r11, 0;
799 nop;
800
801/* MS: we are in virtual mode */
802ret_from_irq:
803 lwi r11, r1, PTO + PT_MODE;
804 bnei r11, 2f;
805
b1d70c62 806 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
ca54502b
MS
807 lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
808 andi r11, r11, _TIF_NEED_RESCHED;
809 beqi r11, 5f
810 bralid r15, schedule;
811 nop; /* delay slot */
812
813 /* Maybe handle a signal */
b1d70c62 8145: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */
ca54502b
MS
815 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
816 andi r11, r11, _TIF_SIGPENDING;
817 beqid r11, no_intr_resched
818/* Handle a signal return; Pending signals should be in r18. */
819 addi r7, r0, 0; /* Arg 3: int in_syscall */
820 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
821 bralid r15, do_signal; /* Handle any signals */
822 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
823
824/* Finally, return to user state. */
825no_intr_resched:
826 /* Disable interrupts, we are now committed to the state restore */
827 disable_irq
828 swi r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */
8633bebc 829 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
ca54502b
MS
830 VM_OFF;
831 tophys(r1,r1);
832 lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
833 lwi r4, r1, PTO + PT_R4;
834 RESTORE_REGS
835 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
836 lwi r1, r1, PT_R1 - PT_SIZE;
837 bri 6f;
838/* MS: Return to kernel state. */
77753790
MS
8392:
840#ifdef CONFIG_PREEMPT
b1d70c62 841 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
77753790
MS
842 /* MS: get preempt_count from thread info */
843 lwi r5, r11, TI_PREEMPT_COUNT;
844 bgti r5, restore;
845
846 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
847 andi r5, r5, _TIF_NEED_RESCHED;
848 beqi r5, restore /* if zero jump over */
849
850preempt:
851 /* interrupts are off that's why I am calling preempt_chedule_irq */
852 bralid r15, preempt_schedule_irq
853 nop
b1d70c62 854 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
77753790
MS
855 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
856 andi r5, r5, _TIF_NEED_RESCHED;
857 bnei r5, preempt /* if non zero jump to resched */
858restore:
859#endif
860 VM_OFF /* MS: turn off MMU */
ca54502b
MS
861 tophys(r1,r1)
862 lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
863 lwi r4, r1, PTO + PT_R4;
864 RESTORE_REGS
865 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
866 tovirt(r1,r1);
8676:
868IRQ_return: /* MS: Make global symbol for debugging */
869 rtid r14, 0
870 nop
871
872/*
873 * `Debug' trap
874 * We enter dbtrap in "BIP" (breakpoint) mode.
875 * So we exit the breakpoint mode with an 'rtbd' and proceed with the
876 * original dbtrap.
877 * however, wait to save state first
878 */
879C_ENTRY(_debug_exception):
880 /* BIP bit is set on entry, no interrupts can occur */
881 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
882
883 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
884 set_bip; /*equalize initial state for all possible entries*/
885 clear_eip;
886 enable_irq;
887 lwi r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
888 beqi r11, 1f; /* Jump ahead if coming from user */
889 /* Kernel-mode state save. */
890 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
891 tophys(r1,r11);
892 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
893 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
894
895 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
896 swi r3, r1, PTO + PT_R3;
897 swi r4, r1, PTO + PT_R4;
898 SAVE_REGS;
899
900 addi r11, r0, 1; /* Was in kernel-mode. */
901 swi r11, r1, PTO + PT_MODE;
902 brid 2f;
903 nop; /* Fill delay slot */
9041: /* User-mode state save. */
905 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
906 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
907 tophys(r1,r1);
908 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
909 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
910 tophys(r1,r1);
911
912 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
913 swi r3, r1, PTO + PT_R3;
914 swi r4, r1, PTO + PT_R4;
915 SAVE_REGS;
916
917 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */
918 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
919 swi r11, r1, PTO+PT_R1; /* Store user SP. */
920 addi r11, r0, 1;
921 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
b1d70c62 9222: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
ca54502b
MS
923 /* Save away the syscall number. */
924 swi r0, r1, PTO+PT_R0;
925 tovirt(r1,r1)
926
927 addi r5, r0, SIGTRAP /* send the trap signal */
928 add r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */
929 addk r7, r0, r0 /* 3rd param zero */
930
931 set_vms;
932 la r11, r0, send_sig;
933 la r15, r0, dbtrap_call;
934dbtrap_call: rtbd r11, 0;
935 nop;
936
937 set_bip; /* Ints masked for state restore*/
938 lwi r11, r1, PTO+PT_MODE;
939 bnei r11, 2f;
940
941 /* Get current task ptr into r11 */
b1d70c62 942 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
ca54502b
MS
943 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
944 andi r11, r11, _TIF_NEED_RESCHED;
945 beqi r11, 5f;
946
947/* Call the scheduler before returning from a syscall/trap. */
948
949 bralid r15, schedule; /* Call scheduler */
950 nop; /* delay slot */
951 /* XXX Is PT_DTRACE handling needed here? */
952 /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */
953
954 /* Maybe handle a signal */
b1d70c62 9555: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
ca54502b
MS
956 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
957 andi r11, r11, _TIF_SIGPENDING;
958 beqi r11, 1f; /* Signals to handle, handle them */
959
960/* Handle a signal return; Pending signals should be in r18. */
961 /* Not all registers are saved by the normal trap/interrupt entry
962 points (for instance, call-saved registers (because the normal
963 C-compiler calling sequence in the kernel makes sure they're
964 preserved), and call-clobbered registers in the case of
965 traps), but signal handlers may want to examine or change the
966 complete register state. Here we save anything not saved by
967 the normal entry sequence, so that it may be safely restored
968 (in a possibly modified form) after do_signal returns. */
969
970 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
ca54502b
MS
971 addi r7, r0, 0; /* Arg 3: int in_syscall */
972 bralid r15, do_signal; /* Handle any signals */
841d6e8c 973 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
ca54502b
MS
974
975
976/* Finally, return to user state. */
9771: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
8633bebc 978 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
ca54502b
MS
979 VM_OFF;
980 tophys(r1,r1);
981
982 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
983 lwi r4, r1, PTO+PT_R4;
984 RESTORE_REGS
985 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
986
987
988 lwi r1, r1, PT_R1 - PT_SIZE;
989 /* Restore user stack pointer. */
990 bri 6f;
991
992/* Return to kernel state. */
9932: VM_OFF;
994 tophys(r1,r1);
995 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
996 lwi r4, r1, PTO+PT_R4;
997 RESTORE_REGS
998 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
999
1000 tovirt(r1,r1);
10016:
1002DBTRAP_return: /* Make global symbol for debugging */
1003 rtbd r14, 0; /* Instructions to return from an IRQ */
1004 nop;
1005
1006
1007
1008ENTRY(_switch_to)
1009 /* prepare return value */
b1d70c62 1010 addk r3, r0, CURRENT_TASK
ca54502b
MS
1011
1012 /* save registers in cpu_context */
1013 /* use r11 and r12, volatile registers, as temp register */
1014 /* give start of cpu_context for previous process */
1015 addik r11, r5, TI_CPU_CONTEXT
1016 swi r1, r11, CC_R1
1017 swi r2, r11, CC_R2
1018 /* skip volatile registers.
1019 * they are saved on stack when we jumped to _switch_to() */
1020 /* dedicated registers */
1021 swi r13, r11, CC_R13
1022 swi r14, r11, CC_R14
1023 swi r15, r11, CC_R15
1024 swi r16, r11, CC_R16
1025 swi r17, r11, CC_R17
1026 swi r18, r11, CC_R18
1027 /* save non-volatile registers */
1028 swi r19, r11, CC_R19
1029 swi r20, r11, CC_R20
1030 swi r21, r11, CC_R21
1031 swi r22, r11, CC_R22
1032 swi r23, r11, CC_R23
1033 swi r24, r11, CC_R24
1034 swi r25, r11, CC_R25
1035 swi r26, r11, CC_R26
1036 swi r27, r11, CC_R27
1037 swi r28, r11, CC_R28
1038 swi r29, r11, CC_R29
1039 swi r30, r11, CC_R30
1040 /* special purpose registers */
1041 mfs r12, rmsr
1042 nop
1043 swi r12, r11, CC_MSR
1044 mfs r12, rear
1045 nop
1046 swi r12, r11, CC_EAR
1047 mfs r12, resr
1048 nop
1049 swi r12, r11, CC_ESR
1050 mfs r12, rfsr
1051 nop
1052 swi r12, r11, CC_FSR
1053
b1d70c62
MS
1054 /* update r31, the current-give me pointer to task which will be next */
1055 lwi CURRENT_TASK, r6, TI_TASK
ca54502b 1056 /* stored it to current_save too */
b1d70c62 1057 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
ca54502b
MS
1058
1059 /* get new process' cpu context and restore */
1060 /* give me start where start context of next task */
1061 addik r11, r6, TI_CPU_CONTEXT
1062
1063 /* non-volatile registers */
1064 lwi r30, r11, CC_R30
1065 lwi r29, r11, CC_R29
1066 lwi r28, r11, CC_R28
1067 lwi r27, r11, CC_R27
1068 lwi r26, r11, CC_R26
1069 lwi r25, r11, CC_R25
1070 lwi r24, r11, CC_R24
1071 lwi r23, r11, CC_R23
1072 lwi r22, r11, CC_R22
1073 lwi r21, r11, CC_R21
1074 lwi r20, r11, CC_R20
1075 lwi r19, r11, CC_R19
1076 /* dedicated registers */
1077 lwi r18, r11, CC_R18
1078 lwi r17, r11, CC_R17
1079 lwi r16, r11, CC_R16
1080 lwi r15, r11, CC_R15
1081 lwi r14, r11, CC_R14
1082 lwi r13, r11, CC_R13
1083 /* skip volatile registers */
1084 lwi r2, r11, CC_R2
1085 lwi r1, r11, CC_R1
1086
1087 /* special purpose registers */
1088 lwi r12, r11, CC_FSR
1089 mts rfsr, r12
1090 nop
1091 lwi r12, r11, CC_MSR
1092 mts rmsr, r12
1093 nop
1094
1095 rtsd r15, 8
1096 nop
1097
1098ENTRY(_reset)
1099 brai 0x70; /* Jump back to FS-boot */
1100
1101ENTRY(_break)
1102 mfs r5, rmsr
1103 nop
1104 swi r5, r0, 0x250 + TOPHYS(r0_ram)
1105 mfs r5, resr
1106 nop
1107 swi r5, r0, 0x254 + TOPHYS(r0_ram)
1108 bri 0
1109
1110 /* These are compiled and loaded into high memory, then
1111 * copied into place in mach_early_setup */
1112 .section .init.ivt, "ax"
1113 .org 0x0
1114 /* this is very important - here is the reset vector */
1115 /* in current MMU branch you don't care what is here - it is
1116 * used from bootloader site - but this is correct for FS-BOOT */
1117 brai 0x70
1118 nop
1119 brai TOPHYS(_user_exception); /* syscall handler */
1120 brai TOPHYS(_interrupt); /* Interrupt handler */
1121 brai TOPHYS(_break); /* nmi trap handler */
1122 brai TOPHYS(_hw_exception_handler); /* HW exception handler */
1123
1124 .org 0x60
1125 brai TOPHYS(_debug_exception); /* debug trap handler*/
1126
1127.section .rodata,"a"
1128#include "syscall_table.S"
1129
1130syscall_table_size=(.-sys_call_table)
1131
ce3266c0
SM
1132type_SYSCALL:
1133 .ascii "SYSCALL\0"
1134type_IRQ:
1135 .ascii "IRQ\0"
1136type_IRQ_PREEMPT:
1137 .ascii "IRQ (PREEMPTED)\0"
1138type_SYSCALL_PREEMPT:
1139 .ascii " SYSCALL (PREEMPTED)\0"
1140
1141 /*
1142 * Trap decoding for stack unwinder
1143 * Tuples are (start addr, end addr, string)
1144 * If return address lies on [start addr, end addr],
1145 * unwinder displays 'string'
1146 */
1147
1148 .align 4
1149.global microblaze_trap_handlers
1150microblaze_trap_handlers:
1151 /* Exact matches come first */
1152 .word ret_from_trap; .word ret_from_trap ; .word type_SYSCALL
1153 .word ret_from_irq ; .word ret_from_irq ; .word type_IRQ
1154 /* Fuzzy matches go here */
1155 .word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT
1156 .word ret_from_trap; .word TRAP_return ; .word type_SYSCALL_PREEMPT
1157 /* End of table */
1158 .word 0 ; .word 0 ; .word 0
This page took 0.134917 seconds and 5 git commands to generate.