microblaze: Fix _interrupt function
[deliverable/linux.git] / arch / microblaze / kernel / entry.S
1 /*
2 * Low-level system-call handling, trap handlers and context-switching
3 *
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008-2009 PetaLogix
6 * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
7 * Copyright (C) 2001,2002 NEC Corporation
8 * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
9 *
10 * This file is subject to the terms and conditions of the GNU General
11 * Public License. See the file COPYING in the main directory of this
12 * archive for more details.
13 *
14 * Written by Miles Bader <miles@gnu.org>
15 * Heavily modified by John Williams for Microblaze
16 */
17
18 #include <linux/sys.h>
19 #include <linux/linkage.h>
20
21 #include <asm/entry.h>
22 #include <asm/current.h>
23 #include <asm/processor.h>
24 #include <asm/exceptions.h>
25 #include <asm/asm-offsets.h>
26 #include <asm/thread_info.h>
27
28 #include <asm/page.h>
29 #include <asm/unistd.h>
30
31 #include <linux/errno.h>
32 #include <asm/signal.h>
33
34 #undef DEBUG
35
36 /* The size of a state save frame. */
37 #define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE)
38
39 /* The offset of the struct pt_regs in a `state save frame' on the stack. */
40 #define PTO STATE_SAVE_ARG_SPACE /* 24 the space for args */
41
42 #define C_ENTRY(name) .globl name; .align 4; name
43
44 /*
45 * Various ways of setting and clearing BIP in flags reg.
46 * This is mucky, but necessary using microblaze version that
47 * allows msr ops to write to BIP
48 */
49 #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
50 .macro clear_bip
51 msrclr r0, MSR_BIP
52 nop
53 .endm
54
55 .macro set_bip
56 msrset r0, MSR_BIP
57 nop
58 .endm
59
60 .macro clear_eip
61 msrclr r0, MSR_EIP
62 nop
63 .endm
64
65 .macro set_ee
66 msrset r0, MSR_EE
67 nop
68 .endm
69
70 .macro disable_irq
71 msrclr r0, MSR_IE
72 nop
73 .endm
74
75 .macro enable_irq
76 msrset r0, MSR_IE
77 nop
78 .endm
79
80 .macro set_ums
81 msrset r0, MSR_UMS
82 nop
83 msrclr r0, MSR_VMS
84 nop
85 .endm
86
87 .macro set_vms
88 msrclr r0, MSR_UMS
89 nop
90 msrset r0, MSR_VMS
91 nop
92 .endm
93
94 .macro clear_ums
95 msrclr r0, MSR_UMS
96 nop
97 .endm
98
99 .macro clear_vms_ums
100 msrclr r0, MSR_VMS | MSR_UMS
101 nop
102 .endm
103 #else
104 .macro clear_bip
105 mfs r11, rmsr
106 nop
107 andi r11, r11, ~MSR_BIP
108 mts rmsr, r11
109 nop
110 .endm
111
112 .macro set_bip
113 mfs r11, rmsr
114 nop
115 ori r11, r11, MSR_BIP
116 mts rmsr, r11
117 nop
118 .endm
119
120 .macro clear_eip
121 mfs r11, rmsr
122 nop
123 andi r11, r11, ~MSR_EIP
124 mts rmsr, r11
125 nop
126 .endm
127
128 .macro set_ee
129 mfs r11, rmsr
130 nop
131 ori r11, r11, MSR_EE
132 mts rmsr, r11
133 nop
134 .endm
135
136 .macro disable_irq
137 mfs r11, rmsr
138 nop
139 andi r11, r11, ~MSR_IE
140 mts rmsr, r11
141 nop
142 .endm
143
144 .macro enable_irq
145 mfs r11, rmsr
146 nop
147 ori r11, r11, MSR_IE
148 mts rmsr, r11
149 nop
150 .endm
151
152 .macro set_ums
153 mfs r11, rmsr
154 nop
155 ori r11, r11, MSR_VMS
156 andni r11, r11, MSR_UMS
157 mts rmsr, r11
158 nop
159 .endm
160
161 .macro set_vms
162 mfs r11, rmsr
163 nop
164 ori r11, r11, MSR_VMS
165 andni r11, r11, MSR_UMS
166 mts rmsr, r11
167 nop
168 .endm
169
170 .macro clear_ums
171 mfs r11, rmsr
172 nop
173 andni r11, r11, MSR_UMS
174 mts rmsr,r11
175 nop
176 .endm
177
178 .macro clear_vms_ums
179 mfs r11, rmsr
180 nop
181 andni r11, r11, (MSR_VMS|MSR_UMS)
182 mts rmsr,r11
183 nop
184 .endm
185 #endif
186
187 /* Define how to call high-level functions. With MMU, virtual mode must be
188 * enabled when calling the high-level function. Clobbers R11.
189 * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
190 */
191
192 /* turn on virtual protected mode save */
193 #define VM_ON \
194 set_ums; \
195 rted r0, 2f; \
196 nop; \
197 2:
198
199 /* turn off virtual protected mode save and user mode save*/
200 #define VM_OFF \
201 clear_vms_ums; \
202 rted r0, TOPHYS(1f); \
203 nop; \
204 1:
205
206 #define SAVE_REGS \
207 swi r2, r1, PTO+PT_R2; /* Save SDA */ \
208 swi r3, r1, PTO+PT_R3; \
209 swi r4, r1, PTO+PT_R4; \
210 swi r5, r1, PTO+PT_R5; \
211 swi r6, r1, PTO+PT_R6; \
212 swi r7, r1, PTO+PT_R7; \
213 swi r8, r1, PTO+PT_R8; \
214 swi r9, r1, PTO+PT_R9; \
215 swi r10, r1, PTO+PT_R10; \
216 swi r11, r1, PTO+PT_R11; /* save clobbered regs after rval */\
217 swi r12, r1, PTO+PT_R12; \
218 swi r13, r1, PTO+PT_R13; /* Save SDA2 */ \
219 swi r14, r1, PTO+PT_PC; /* PC, before IRQ/trap */ \
220 swi r15, r1, PTO+PT_R15; /* Save LP */ \
221 swi r18, r1, PTO+PT_R18; /* Save asm scratch reg */ \
222 swi r19, r1, PTO+PT_R19; \
223 swi r20, r1, PTO+PT_R20; \
224 swi r21, r1, PTO+PT_R21; \
225 swi r22, r1, PTO+PT_R22; \
226 swi r23, r1, PTO+PT_R23; \
227 swi r24, r1, PTO+PT_R24; \
228 swi r25, r1, PTO+PT_R25; \
229 swi r26, r1, PTO+PT_R26; \
230 swi r27, r1, PTO+PT_R27; \
231 swi r28, r1, PTO+PT_R28; \
232 swi r29, r1, PTO+PT_R29; \
233 swi r30, r1, PTO+PT_R30; \
234 swi r31, r1, PTO+PT_R31; /* Save current task reg */ \
235 mfs r11, rmsr; /* save MSR */ \
236 nop; \
237 swi r11, r1, PTO+PT_MSR;
238
239 #define RESTORE_REGS \
240 lwi r11, r1, PTO+PT_MSR; \
241 mts rmsr , r11; \
242 nop; \
243 lwi r2, r1, PTO+PT_R2; /* restore SDA */ \
244 lwi r3, r1, PTO+PT_R3; \
245 lwi r4, r1, PTO+PT_R4; \
246 lwi r5, r1, PTO+PT_R5; \
247 lwi r6, r1, PTO+PT_R6; \
248 lwi r7, r1, PTO+PT_R7; \
249 lwi r8, r1, PTO+PT_R8; \
250 lwi r9, r1, PTO+PT_R9; \
251 lwi r10, r1, PTO+PT_R10; \
252 lwi r11, r1, PTO+PT_R11; /* restore clobbered regs after rval */\
253 lwi r12, r1, PTO+PT_R12; \
254 lwi r13, r1, PTO+PT_R13; /* restore SDA2 */ \
255 lwi r14, r1, PTO+PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
256 lwi r15, r1, PTO+PT_R15; /* restore LP */ \
257 lwi r18, r1, PTO+PT_R18; /* restore asm scratch reg */ \
258 lwi r19, r1, PTO+PT_R19; \
259 lwi r20, r1, PTO+PT_R20; \
260 lwi r21, r1, PTO+PT_R21; \
261 lwi r22, r1, PTO+PT_R22; \
262 lwi r23, r1, PTO+PT_R23; \
263 lwi r24, r1, PTO+PT_R24; \
264 lwi r25, r1, PTO+PT_R25; \
265 lwi r26, r1, PTO+PT_R26; \
266 lwi r27, r1, PTO+PT_R27; \
267 lwi r28, r1, PTO+PT_R28; \
268 lwi r29, r1, PTO+PT_R29; \
269 lwi r30, r1, PTO+PT_R30; \
270 lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */
271
272 #define SAVE_STATE \
273 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \
274 /* See if already in kernel mode.*/ \
275 mfs r1, rmsr; \
276 nop; \
277 andi r1, r1, MSR_UMS; \
278 bnei r1, 1f; \
279 /* Kernel-mode state save. */ \
280 /* Reload kernel stack-ptr. */ \
281 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
282 /* FIXME: I can add these two lines to one */ \
283 /* tophys(r1,r1); */ \
284 /* addik r1, r1, -STATE_SAVE_SIZE; */ \
285 addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE; \
286 SAVE_REGS \
287 brid 2f; \
288 swi r1, r1, PTO+PT_MODE; \
289 1: /* User-mode state save. */ \
290 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
291 tophys(r1,r1); \
292 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
293 /* MS these three instructions can be added to one */ \
294 /* addik r1, r1, THREAD_SIZE; */ \
295 /* tophys(r1,r1); */ \
296 /* addik r1, r1, -STATE_SAVE_SIZE; */ \
297 addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE; \
298 SAVE_REGS \
299 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
300 swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
301 swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */ \
302 /* MS: I am clearing UMS even in case when I come from kernel space */ \
303 clear_ums; \
304 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
305
306 .text
307
308 /*
309 * User trap.
310 *
311 * System calls are handled here.
312 *
313 * Syscall protocol:
314 * Syscall number in r12, args in r5-r10
315 * Return value in r3
316 *
317 * Trap entered via brki instruction, so BIP bit is set, and interrupts
318 * are masked. This is nice, means we don't have to CLI before state save
319 */
320 C_ENTRY(_user_exception):
321 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
322 addi r14, r14, 4 /* return address is 4 byte after call */
323
324 mfs r1, rmsr
325 nop
326 andi r1, r1, MSR_UMS
327 bnei r1, 1f
328
329 /* Kernel-mode state save - kernel execve */
330 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
331 tophys(r1,r1);
332
333 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
334 SAVE_REGS
335
336 swi r1, r1, PTO + PT_MODE; /* pt_regs -> kernel mode */
337 brid 2f;
338 nop; /* Fill delay slot */
339
340 /* User-mode state save. */
341 1:
342 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
343 tophys(r1,r1);
344 lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
345 /* calculate kernel stack pointer from task struct 8k */
346 addik r1, r1, THREAD_SIZE;
347 tophys(r1,r1);
348
349 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
350 SAVE_REGS
351
352 swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */
353 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
354 swi r11, r1, PTO+PT_R1; /* Store user SP. */
355 clear_ums;
356 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
357 /* Save away the syscall number. */
358 swi r12, r1, PTO+PT_R0;
359 tovirt(r1,r1)
360
361 /* where the trap should return need -8 to adjust for rtsd r15, 8*/
362 /* Jump to the appropriate function for the system call number in r12
363 * (r12 is not preserved), or return an error if r12 is not valid. The LP
364 * register should point to the location where
365 * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
366
367 /* Step into virtual mode */
368 rtbd r0, 3f
369 nop
370 3:
371 lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
372 lwi r11, r11, TI_FLAGS /* get flags in thread info */
373 andi r11, r11, _TIF_WORK_SYSCALL_MASK
374 beqi r11, 4f
375
376 addik r3, r0, -ENOSYS
377 swi r3, r1, PTO + PT_R3
378 brlid r15, do_syscall_trace_enter
379 addik r5, r1, PTO + PT_R0
380
381 # do_syscall_trace_enter returns the new syscall nr.
382 addk r12, r0, r3
383 lwi r5, r1, PTO+PT_R5;
384 lwi r6, r1, PTO+PT_R6;
385 lwi r7, r1, PTO+PT_R7;
386 lwi r8, r1, PTO+PT_R8;
387 lwi r9, r1, PTO+PT_R9;
388 lwi r10, r1, PTO+PT_R10;
389 4:
390 /* Jump to the appropriate function for the system call number in r12
391 * (r12 is not preserved), or return an error if r12 is not valid.
392 * The LP register should point to the location where the called function
393 * should return. [note that MAKE_SYS_CALL uses label 1] */
394 /* See if the system call number is valid */
395 addi r11, r12, -__NR_syscalls;
396 bgei r11,5f;
397 /* Figure out which function to use for this system call. */
398 /* Note Microblaze barrel shift is optional, so don't rely on it */
399 add r12, r12, r12; /* convert num -> ptr */
400 add r12, r12, r12;
401
402 #ifdef DEBUG
403 /* Trac syscalls and stored them to r0_ram */
404 lwi r3, r12, 0x400 + r0_ram
405 addi r3, r3, 1
406 swi r3, r12, 0x400 + r0_ram
407 #endif
408
409 # Find and jump into the syscall handler.
410 lwi r12, r12, sys_call_table
411 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
412 addi r15, r0, ret_from_trap-8
413 bra r12
414
415 /* The syscall number is invalid, return an error. */
416 5:
417 rtsd r15, 8; /* looks like a normal subroutine return */
418 addi r3, r0, -ENOSYS;
419
420 /* Entry point used to return from a syscall/trap */
421 /* We re-enable BIP bit before state restore */
422 C_ENTRY(ret_from_trap):
423 swi r3, r1, PTO + PT_R3
424 swi r4, r1, PTO + PT_R4
425
426 lwi r11, r1, PTO + PT_MODE;
427 /* See if returning to kernel mode, if so, skip resched &c. */
428 bnei r11, 2f;
429 /* We're returning to user mode, so check for various conditions that
430 * trigger rescheduling. */
431 /* FIXME: Restructure all these flag checks. */
432 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
433 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
434 andi r11, r11, _TIF_WORK_SYSCALL_MASK
435 beqi r11, 1f
436
437 brlid r15, do_syscall_trace_leave
438 addik r5, r1, PTO + PT_R0
439 1:
440 /* We're returning to user mode, so check for various conditions that
441 * trigger rescheduling. */
442 /* get thread info from current task */
443 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
444 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
445 andi r11, r11, _TIF_NEED_RESCHED;
446 beqi r11, 5f;
447
448 bralid r15, schedule; /* Call scheduler */
449 nop; /* delay slot */
450
451 /* Maybe handle a signal */
452 5: /* get thread info from current task*/
453 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
454 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
455 andi r11, r11, _TIF_SIGPENDING;
456 beqi r11, 1f; /* Signals to handle, handle them */
457
458 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
459 addi r7, r0, 1; /* Arg 3: int in_syscall */
460 bralid r15, do_signal; /* Handle any signals */
461 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
462
463 /* Finally, return to user state. */
464 1: set_bip; /* Ints masked for state restore */
465 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
466 VM_OFF;
467 tophys(r1,r1);
468 RESTORE_REGS;
469 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
470 lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
471 bri 6f;
472
473 /* Return to kernel state. */
474 2: set_bip; /* Ints masked for state restore */
475 VM_OFF;
476 tophys(r1,r1);
477 RESTORE_REGS;
478 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
479 tovirt(r1,r1);
480 6:
481 TRAP_return: /* Make global symbol for debugging */
482 rtbd r14, 0; /* Instructions to return from an IRQ */
483 nop;
484
485
486 /* These syscalls need access to the struct pt_regs on the stack, so we
487 implement them in assembly (they're basically all wrappers anyway). */
488
489 C_ENTRY(sys_fork_wrapper):
490 addi r5, r0, SIGCHLD /* Arg 0: flags */
491 lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */
492 addik r7, r1, PTO /* Arg 2: parent context */
493 add r8. r0, r0 /* Arg 3: (unused) */
494 add r9, r0, r0; /* Arg 4: (unused) */
495 brid do_fork /* Do real work (tail-call) */
496 add r10, r0, r0; /* Arg 5: (unused) */
497
498 /* This the initial entry point for a new child thread, with an appropriate
499 stack in place that makes it look the the child is in the middle of an
500 syscall. This function is actually `returned to' from switch_thread
501 (copy_thread makes ret_from_fork the return address in each new thread's
502 saved context). */
503 C_ENTRY(ret_from_fork):
504 bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
505 add r3, r5, r0; /* switch_thread returns the prev task */
506 /* ( in the delay slot ) */
507 brid ret_from_trap; /* Do normal trap return */
508 add r3, r0, r0; /* Child's fork call should return 0. */
509
510 C_ENTRY(sys_vfork):
511 brid microblaze_vfork /* Do real work (tail-call) */
512 addik r5, r1, PTO
513
514 C_ENTRY(sys_clone):
515 bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */
516 lwi r6, r1, PTO + PT_R1; /* If so, use paret's stack ptr */
517 1: addik r7, r1, PTO; /* Arg 2: parent context */
518 add r8, r0, r0; /* Arg 3: (unused) */
519 add r9, r0, r0; /* Arg 4: (unused) */
520 brid do_fork /* Do real work (tail-call) */
521 add r10, r0, r0; /* Arg 5: (unused) */
522
523 C_ENTRY(sys_execve):
524 brid microblaze_execve; /* Do real work (tail-call).*/
525 addik r8, r1, PTO; /* add user context as 4th arg */
526
527 C_ENTRY(sys_rt_sigreturn_wrapper):
528 swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
529 swi r4, r1, PTO+PT_R4;
530 brlid r15, sys_rt_sigreturn /* Do real work */
531 addik r5, r1, PTO; /* add user context as 1st arg */
532 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
533 lwi r4, r1, PTO+PT_R4;
534 bri ret_from_trap /* fall through will not work here due to align */
535 nop;
536
537 /*
538 * HW EXCEPTION rutine start
539 */
540 C_ENTRY(full_exception_trap):
541 /* adjust exception address for privileged instruction
542 * for finding where is it */
543 addik r17, r17, -4
544 SAVE_STATE /* Save registers */
545 /* PC, before IRQ/trap - this is one instruction above */
546 swi r17, r1, PTO+PT_PC;
547 tovirt(r1,r1)
548 /* FIXME this can be store directly in PT_ESR reg.
549 * I tested it but there is a fault */
550 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
551 addik r15, r0, ret_from_exc - 8
552 mfs r6, resr
553 nop
554 mfs r7, rfsr; /* save FSR */
555 nop
556 mts rfsr, r0; /* Clear sticky fsr */
557 nop
558 rted r0, full_exception
559 addik r5, r1, PTO /* parameter struct pt_regs * regs */
560
561 /*
562 * Unaligned data trap.
563 *
564 * Unaligned data trap last on 4k page is handled here.
565 *
566 * Trap entered via exception, so EE bit is set, and interrupts
567 * are masked. This is nice, means we don't have to CLI before state save
568 *
569 * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
570 */
571 C_ENTRY(unaligned_data_trap):
572 /* MS: I have to save r11 value and then restore it because
573 * set_bit, clear_eip, set_ee use r11 as temp register if MSR
574 * instructions are not used. We don't need to do if MSR instructions
575 * are used and they use r0 instead of r11.
576 * I am using ENTRY_SP which should be primary used only for stack
577 * pointer saving. */
578 swi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
579 set_bip; /* equalize initial state for all possible entries */
580 clear_eip;
581 set_ee;
582 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
583 SAVE_STATE /* Save registers.*/
584 /* PC, before IRQ/trap - this is one instruction above */
585 swi r17, r1, PTO+PT_PC;
586 tovirt(r1,r1)
587 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
588 addik r15, r0, ret_from_exc-8
589 mfs r3, resr /* ESR */
590 nop
591 mfs r4, rear /* EAR */
592 nop
593 rtbd r0, _unaligned_data_exception
594 addik r7, r1, PTO /* parameter struct pt_regs * regs */
595
596 /*
597 * Page fault traps.
598 *
599 * If the real exception handler (from hw_exception_handler.S) didn't find
600 * the mapping for the process, then we're thrown here to handle such situation.
601 *
602 * Trap entered via exceptions, so EE bit is set, and interrupts
603 * are masked. This is nice, means we don't have to CLI before state save
604 *
605 * Build a standard exception frame for TLB Access errors. All TLB exceptions
606 * will bail out to this point if they can't resolve the lightweight TLB fault.
607 *
608 * The C function called is in "arch/microblaze/mm/fault.c", declared as:
609 * void do_page_fault(struct pt_regs *regs,
610 * unsigned long address,
611 * unsigned long error_code)
612 */
613 /* data and intruction trap - which is choose is resolved int fault.c */
614 C_ENTRY(page_fault_data_trap):
615 SAVE_STATE /* Save registers.*/
616 /* PC, before IRQ/trap - this is one instruction above */
617 swi r17, r1, PTO+PT_PC;
618 tovirt(r1,r1)
619 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
620 addik r15, r0, ret_from_exc-8
621 mfs r6, rear /* parameter unsigned long address */
622 nop
623 mfs r7, resr /* parameter unsigned long error_code */
624 nop
625 rted r0, do_page_fault
626 addik r5, r1, PTO /* parameter struct pt_regs * regs */
627
628 C_ENTRY(page_fault_instr_trap):
629 SAVE_STATE /* Save registers.*/
630 /* PC, before IRQ/trap - this is one instruction above */
631 swi r17, r1, PTO+PT_PC;
632 tovirt(r1,r1)
633 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
634 addik r15, r0, ret_from_exc-8
635 mfs r6, rear /* parameter unsigned long address */
636 nop
637 ori r7, r0, 0 /* parameter unsigned long error_code */
638 rted r0, do_page_fault
639 addik r5, r1, PTO /* parameter struct pt_regs * regs */
640
641 /* Entry point used to return from an exception. */
642 C_ENTRY(ret_from_exc):
643 lwi r11, r1, PTO + PT_MODE;
644 bnei r11, 2f; /* See if returning to kernel mode, */
645 /* ... if so, skip resched &c. */
646
647 /* We're returning to user mode, so check for various conditions that
648 trigger rescheduling. */
649 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
650 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
651 andi r11, r11, _TIF_NEED_RESCHED;
652 beqi r11, 5f;
653
654 /* Call the scheduler before returning from a syscall/trap. */
655 bralid r15, schedule; /* Call scheduler */
656 nop; /* delay slot */
657
658 /* Maybe handle a signal */
659 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
660 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
661 andi r11, r11, _TIF_SIGPENDING;
662 beqi r11, 1f; /* Signals to handle, handle them */
663
664 /*
665 * Handle a signal return; Pending signals should be in r18.
666 *
667 * Not all registers are saved by the normal trap/interrupt entry
668 * points (for instance, call-saved registers (because the normal
669 * C-compiler calling sequence in the kernel makes sure they're
670 * preserved), and call-clobbered registers in the case of
671 * traps), but signal handlers may want to examine or change the
672 * complete register state. Here we save anything not saved by
673 * the normal entry sequence, so that it may be safely restored
674 * (in a possibly modified form) after do_signal returns. */
675 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
676 addi r7, r0, 0; /* Arg 3: int in_syscall */
677 bralid r15, do_signal; /* Handle any signals */
678 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
679
680 /* Finally, return to user state. */
681 1: set_bip; /* Ints masked for state restore */
682 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
683 VM_OFF;
684 tophys(r1,r1);
685
686 RESTORE_REGS;
687 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
688
689 lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
690 bri 6f;
691 /* Return to kernel state. */
692 2: set_bip; /* Ints masked for state restore */
693 VM_OFF;
694 tophys(r1,r1);
695 RESTORE_REGS;
696 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
697
698 tovirt(r1,r1);
699 6:
700 EXC_return: /* Make global symbol for debugging */
701 rtbd r14, 0; /* Instructions to return from an IRQ */
702 nop;
703
704 /*
705 * HW EXCEPTION rutine end
706 */
707
708 /*
709 * Hardware maskable interrupts.
710 *
711 * The stack-pointer (r1) should have already been saved to the memory
712 * location PER_CPU(ENTRY_SP).
713 */
714 C_ENTRY(_interrupt):
715 /* MS: we are in physical address */
716 /* Save registers, switch to proper stack, convert SP to virtual.*/
717 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
718 /* MS: See if already in kernel mode. */
719 mfs r1, rmsr
720 nop
721 andi r1, r1, MSR_UMS
722 bnei r1, 1f
723
724 /* Kernel-mode state save. */
725 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
726 tophys(r1,r1); /* MS: I have in r1 physical address where stack is */
727 /* save registers */
728 /* MS: Make room on the stack -> activation record */
729 addik r1, r1, -STATE_SAVE_SIZE;
730 SAVE_REGS
731 swi r1, r1, PTO + PT_MODE; /* 0 - user mode, 1 - kernel mode */
732 brid 2f;
733 nop; /* MS: Fill delay slot */
734
735 1:
736 /* User-mode state save. */
737 /* MS: get the saved current */
738 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
739 tophys(r1,r1);
740 lwi r1, r1, TS_THREAD_INFO;
741 addik r1, r1, THREAD_SIZE;
742 tophys(r1,r1);
743 /* save registers */
744 addik r1, r1, -STATE_SAVE_SIZE;
745 SAVE_REGS
746 /* calculate mode */
747 swi r0, r1, PTO + PT_MODE;
748 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
749 swi r11, r1, PTO+PT_R1;
750 clear_ums;
751 2:
752 lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
753 tovirt(r1,r1)
754 addik r15, r0, irq_call;
755 irq_call:rtbd r0, do_IRQ;
756 addik r5, r1, PTO;
757
758 /* MS: we are in virtual mode */
759 ret_from_irq:
760 lwi r11, r1, PTO + PT_MODE;
761 bnei r11, 2f;
762
763 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
764 lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
765 andi r11, r11, _TIF_NEED_RESCHED;
766 beqi r11, 5f
767 bralid r15, schedule;
768 nop; /* delay slot */
769
770 /* Maybe handle a signal */
771 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */
772 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
773 andi r11, r11, _TIF_SIGPENDING;
774 beqid r11, no_intr_resched
775 /* Handle a signal return; Pending signals should be in r18. */
776 addi r7, r0, 0; /* Arg 3: int in_syscall */
777 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
778 bralid r15, do_signal; /* Handle any signals */
779 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
780
781 /* Finally, return to user state. */
782 no_intr_resched:
783 /* Disable interrupts, we are now committed to the state restore */
784 disable_irq
785 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
786 VM_OFF;
787 tophys(r1,r1);
788 RESTORE_REGS
789 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
790 lwi r1, r1, PT_R1 - PT_SIZE;
791 bri 6f;
792 /* MS: Return to kernel state. */
793 2:
794 #ifdef CONFIG_PREEMPT
795 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
796 /* MS: get preempt_count from thread info */
797 lwi r5, r11, TI_PREEMPT_COUNT;
798 bgti r5, restore;
799
800 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
801 andi r5, r5, _TIF_NEED_RESCHED;
802 beqi r5, restore /* if zero jump over */
803
804 preempt:
805 /* interrupts are off that's why I am calling preempt_chedule_irq */
806 bralid r15, preempt_schedule_irq
807 nop
808 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
809 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
810 andi r5, r5, _TIF_NEED_RESCHED;
811 bnei r5, preempt /* if non zero jump to resched */
812 restore:
813 #endif
814 VM_OFF /* MS: turn off MMU */
815 tophys(r1,r1)
816 RESTORE_REGS
817 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
818 tovirt(r1,r1);
819 6:
820 IRQ_return: /* MS: Make global symbol for debugging */
821 rtid r14, 0
822 nop
823
824 /*
825 * `Debug' trap
826 * We enter dbtrap in "BIP" (breakpoint) mode.
827 * So we exit the breakpoint mode with an 'rtbd' and proceed with the
828 * original dbtrap.
829 * however, wait to save state first
830 */
831 C_ENTRY(_debug_exception):
832 /* BIP bit is set on entry, no interrupts can occur */
833 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
834
835 mfs r1, rmsr
836 nop
837 andi r1, r1, MSR_UMS
838 bnei r1, 1f
839 /* Kernel-mode state save. */
840 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
841 tophys(r1,r1);
842
843 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
844 SAVE_REGS;
845
846 swi r1, r1, PTO + PT_MODE;
847 brid 2f;
848 nop; /* Fill delay slot */
849 1: /* User-mode state save. */
850 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
851 tophys(r1,r1);
852 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
853 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
854 tophys(r1,r1);
855
856 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
857 SAVE_REGS;
858
859 swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */
860 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
861 swi r11, r1, PTO+PT_R1; /* Store user SP. */
862 2:
863 tovirt(r1,r1)
864
865 set_vms;
866 addi r5, r0, SIGTRAP /* send the trap signal */
867 add r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */
868 addk r7, r0, r0 /* 3rd param zero */
869 dbtrap_call: rtbd r0, send_sig;
870 addik r15, r0, dbtrap_call;
871
872 set_bip; /* Ints masked for state restore*/
873 lwi r11, r1, PTO + PT_MODE;
874 bnei r11, 2f;
875
876 /* Get current task ptr into r11 */
877 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
878 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
879 andi r11, r11, _TIF_NEED_RESCHED;
880 beqi r11, 5f;
881
882 /* Call the scheduler before returning from a syscall/trap. */
883
884 bralid r15, schedule; /* Call scheduler */
885 nop; /* delay slot */
886 /* XXX Is PT_DTRACE handling needed here? */
887 /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */
888
889 /* Maybe handle a signal */
890 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
891 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
892 andi r11, r11, _TIF_SIGPENDING;
893 beqi r11, 1f; /* Signals to handle, handle them */
894
895 /* Handle a signal return; Pending signals should be in r18. */
896 /* Not all registers are saved by the normal trap/interrupt entry
897 points (for instance, call-saved registers (because the normal
898 C-compiler calling sequence in the kernel makes sure they're
899 preserved), and call-clobbered registers in the case of
900 traps), but signal handlers may want to examine or change the
901 complete register state. Here we save anything not saved by
902 the normal entry sequence, so that it may be safely restored
903 (in a possibly modified form) after do_signal returns. */
904
905 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
906 addi r7, r0, 0; /* Arg 3: int in_syscall */
907 bralid r15, do_signal; /* Handle any signals */
908 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
909
910
911 /* Finally, return to user state. */
912 1:
913 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
914 VM_OFF;
915 tophys(r1,r1);
916
917 RESTORE_REGS
918 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
919
920
921 lwi r1, r1, PT_R1 - PT_SIZE;
922 /* Restore user stack pointer. */
923 bri 6f;
924
925 /* Return to kernel state. */
926 2: VM_OFF;
927 tophys(r1,r1);
928 RESTORE_REGS
929 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
930
931 tovirt(r1,r1);
932 6:
933 DBTRAP_return: /* Make global symbol for debugging */
934 rtbd r14, 0; /* Instructions to return from an IRQ */
935 nop;
936
937
938
939 ENTRY(_switch_to)
940 /* prepare return value */
941 addk r3, r0, CURRENT_TASK
942
943 /* save registers in cpu_context */
944 /* use r11 and r12, volatile registers, as temp register */
945 /* give start of cpu_context for previous process */
946 addik r11, r5, TI_CPU_CONTEXT
947 swi r1, r11, CC_R1
948 swi r2, r11, CC_R2
949 /* skip volatile registers.
950 * they are saved on stack when we jumped to _switch_to() */
951 /* dedicated registers */
952 swi r13, r11, CC_R13
953 swi r14, r11, CC_R14
954 swi r15, r11, CC_R15
955 swi r16, r11, CC_R16
956 swi r17, r11, CC_R17
957 swi r18, r11, CC_R18
958 /* save non-volatile registers */
959 swi r19, r11, CC_R19
960 swi r20, r11, CC_R20
961 swi r21, r11, CC_R21
962 swi r22, r11, CC_R22
963 swi r23, r11, CC_R23
964 swi r24, r11, CC_R24
965 swi r25, r11, CC_R25
966 swi r26, r11, CC_R26
967 swi r27, r11, CC_R27
968 swi r28, r11, CC_R28
969 swi r29, r11, CC_R29
970 swi r30, r11, CC_R30
971 /* special purpose registers */
972 mfs r12, rmsr
973 nop
974 swi r12, r11, CC_MSR
975 mfs r12, rear
976 nop
977 swi r12, r11, CC_EAR
978 mfs r12, resr
979 nop
980 swi r12, r11, CC_ESR
981 mfs r12, rfsr
982 nop
983 swi r12, r11, CC_FSR
984
985 /* update r31, the current-give me pointer to task which will be next */
986 lwi CURRENT_TASK, r6, TI_TASK
987 /* stored it to current_save too */
988 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
989
990 /* get new process' cpu context and restore */
991 /* give me start where start context of next task */
992 addik r11, r6, TI_CPU_CONTEXT
993
994 /* non-volatile registers */
995 lwi r30, r11, CC_R30
996 lwi r29, r11, CC_R29
997 lwi r28, r11, CC_R28
998 lwi r27, r11, CC_R27
999 lwi r26, r11, CC_R26
1000 lwi r25, r11, CC_R25
1001 lwi r24, r11, CC_R24
1002 lwi r23, r11, CC_R23
1003 lwi r22, r11, CC_R22
1004 lwi r21, r11, CC_R21
1005 lwi r20, r11, CC_R20
1006 lwi r19, r11, CC_R19
1007 /* dedicated registers */
1008 lwi r18, r11, CC_R18
1009 lwi r17, r11, CC_R17
1010 lwi r16, r11, CC_R16
1011 lwi r15, r11, CC_R15
1012 lwi r14, r11, CC_R14
1013 lwi r13, r11, CC_R13
1014 /* skip volatile registers */
1015 lwi r2, r11, CC_R2
1016 lwi r1, r11, CC_R1
1017
1018 /* special purpose registers */
1019 lwi r12, r11, CC_FSR
1020 mts rfsr, r12
1021 nop
1022 lwi r12, r11, CC_MSR
1023 mts rmsr, r12
1024 nop
1025
1026 rtsd r15, 8
1027 nop
1028
1029 ENTRY(_reset)
1030 brai 0x70; /* Jump back to FS-boot */
1031
1032 ENTRY(_break)
1033 mfs r5, rmsr
1034 nop
1035 swi r5, r0, 0x250 + TOPHYS(r0_ram)
1036 mfs r5, resr
1037 nop
1038 swi r5, r0, 0x254 + TOPHYS(r0_ram)
1039 bri 0
1040
1041 /* These are compiled and loaded into high memory, then
1042 * copied into place in mach_early_setup */
1043 .section .init.ivt, "ax"
1044 .org 0x0
1045 /* this is very important - here is the reset vector */
1046 /* in current MMU branch you don't care what is here - it is
1047 * used from bootloader site - but this is correct for FS-BOOT */
1048 brai 0x70
1049 nop
1050 brai TOPHYS(_user_exception); /* syscall handler */
1051 brai TOPHYS(_interrupt); /* Interrupt handler */
1052 brai TOPHYS(_break); /* nmi trap handler */
1053 brai TOPHYS(_hw_exception_handler); /* HW exception handler */
1054
1055 .org 0x60
1056 brai TOPHYS(_debug_exception); /* debug trap handler*/
1057
1058 .section .rodata,"a"
1059 #include "syscall_table.S"
1060
1061 syscall_table_size=(.-sys_call_table)
1062
1063 type_SYSCALL:
1064 .ascii "SYSCALL\0"
1065 type_IRQ:
1066 .ascii "IRQ\0"
1067 type_IRQ_PREEMPT:
1068 .ascii "IRQ (PREEMPTED)\0"
1069 type_SYSCALL_PREEMPT:
1070 .ascii " SYSCALL (PREEMPTED)\0"
1071
1072 /*
1073 * Trap decoding for stack unwinder
1074 * Tuples are (start addr, end addr, string)
1075 * If return address lies on [start addr, end addr],
1076 * unwinder displays 'string'
1077 */
1078
1079 .align 4
1080 .global microblaze_trap_handlers
1081 microblaze_trap_handlers:
1082 /* Exact matches come first */
1083 .word ret_from_trap; .word ret_from_trap ; .word type_SYSCALL
1084 .word ret_from_irq ; .word ret_from_irq ; .word type_IRQ
1085 /* Fuzzy matches go here */
1086 .word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT
1087 .word ret_from_trap; .word TRAP_return ; .word type_SYSCALL_PREEMPT
1088 /* End of table */
1089 .word 0 ; .word 0 ; .word 0
This page took 0.058391 seconds and 6 git commands to generate.