microblaze: Save and restore r3/r4 in SAVE/RESTORE_REGS macros
[deliverable/linux.git] / arch / microblaze / kernel / entry.S
1 /*
2 * Low-level system-call handling, trap handlers and context-switching
3 *
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008-2009 PetaLogix
6 * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
7 * Copyright (C) 2001,2002 NEC Corporation
8 * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
9 *
10 * This file is subject to the terms and conditions of the GNU General
11 * Public License. See the file COPYING in the main directory of this
12 * archive for more details.
13 *
14 * Written by Miles Bader <miles@gnu.org>
15 * Heavily modified by John Williams for Microblaze
16 */
17
18 #include <linux/sys.h>
19 #include <linux/linkage.h>
20
21 #include <asm/entry.h>
22 #include <asm/current.h>
23 #include <asm/processor.h>
24 #include <asm/exceptions.h>
25 #include <asm/asm-offsets.h>
26 #include <asm/thread_info.h>
27
28 #include <asm/page.h>
29 #include <asm/unistd.h>
30
31 #include <linux/errno.h>
32 #include <asm/signal.h>
33
34 #undef DEBUG
35
36 /* The size of a state save frame. */
37 #define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE)
38
39 /* The offset of the struct pt_regs in a `state save frame' on the stack. */
40 #define PTO STATE_SAVE_ARG_SPACE /* 24 the space for args */
41
42 #define C_ENTRY(name) .globl name; .align 4; name
43
44 /*
45 * Various ways of setting and clearing BIP in flags reg.
46 * This is mucky, but necessary using microblaze version that
47 * allows msr ops to write to BIP
48 */
49 #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
50 .macro clear_bip
51 msrclr r11, MSR_BIP
52 nop
53 .endm
54
55 .macro set_bip
56 msrset r11, MSR_BIP
57 nop
58 .endm
59
60 .macro clear_eip
61 msrclr r11, MSR_EIP
62 nop
63 .endm
64
65 .macro set_ee
66 msrset r11, MSR_EE
67 nop
68 .endm
69
70 .macro disable_irq
71 msrclr r11, MSR_IE
72 nop
73 .endm
74
75 .macro enable_irq
76 msrset r11, MSR_IE
77 nop
78 .endm
79
80 .macro set_ums
81 msrset r11, MSR_UMS
82 nop
83 msrclr r11, MSR_VMS
84 nop
85 .endm
86
87 .macro set_vms
88 msrclr r11, MSR_UMS
89 nop
90 msrset r11, MSR_VMS
91 nop
92 .endm
93
94 .macro clear_vms_ums
95 msrclr r11, MSR_VMS
96 nop
97 msrclr r11, MSR_UMS
98 nop
99 .endm
100 #else
101 .macro clear_bip
102 mfs r11, rmsr
103 nop
104 andi r11, r11, ~MSR_BIP
105 mts rmsr, r11
106 nop
107 .endm
108
109 .macro set_bip
110 mfs r11, rmsr
111 nop
112 ori r11, r11, MSR_BIP
113 mts rmsr, r11
114 nop
115 .endm
116
117 .macro clear_eip
118 mfs r11, rmsr
119 nop
120 andi r11, r11, ~MSR_EIP
121 mts rmsr, r11
122 nop
123 .endm
124
125 .macro set_ee
126 mfs r11, rmsr
127 nop
128 ori r11, r11, MSR_EE
129 mts rmsr, r11
130 nop
131 .endm
132
133 .macro disable_irq
134 mfs r11, rmsr
135 nop
136 andi r11, r11, ~MSR_IE
137 mts rmsr, r11
138 nop
139 .endm
140
141 .macro enable_irq
142 mfs r11, rmsr
143 nop
144 ori r11, r11, MSR_IE
145 mts rmsr, r11
146 nop
147 .endm
148
149 .macro set_ums
150 mfs r11, rmsr
151 nop
152 ori r11, r11, MSR_VMS
153 andni r11, r11, MSR_UMS
154 mts rmsr, r11
155 nop
156 .endm
157
158 .macro set_vms
159 mfs r11, rmsr
160 nop
161 ori r11, r11, MSR_VMS
162 andni r11, r11, MSR_UMS
163 mts rmsr, r11
164 nop
165 .endm
166
167 .macro clear_vms_ums
168 mfs r11, rmsr
169 nop
170 andni r11, r11, (MSR_VMS|MSR_UMS)
171 mts rmsr,r11
172 nop
173 .endm
174 #endif
175
176 /* Define how to call high-level functions. With MMU, virtual mode must be
177 * enabled when calling the high-level function. Clobbers R11.
178 * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
179 */
180
181 /* turn on virtual protected mode save */
182 #define VM_ON \
183 set_ums; \
184 rted r0, 2f; \
185 nop; \
186 2:
187
188 /* turn off virtual protected mode save and user mode save*/
189 #define VM_OFF \
190 clear_vms_ums; \
191 rted r0, TOPHYS(1f); \
192 nop; \
193 1:
194
195 #define SAVE_REGS \
196 swi r2, r1, PTO+PT_R2; /* Save SDA */ \
197 swi r3, r1, PTO+PT_R3; \
198 swi r4, r1, PTO+PT_R4; \
199 swi r5, r1, PTO+PT_R5; \
200 swi r6, r1, PTO+PT_R6; \
201 swi r7, r1, PTO+PT_R7; \
202 swi r8, r1, PTO+PT_R8; \
203 swi r9, r1, PTO+PT_R9; \
204 swi r10, r1, PTO+PT_R10; \
205 swi r11, r1, PTO+PT_R11; /* save clobbered regs after rval */\
206 swi r12, r1, PTO+PT_R12; \
207 swi r13, r1, PTO+PT_R13; /* Save SDA2 */ \
208 swi r14, r1, PTO+PT_PC; /* PC, before IRQ/trap */ \
209 swi r15, r1, PTO+PT_R15; /* Save LP */ \
210 swi r18, r1, PTO+PT_R18; /* Save asm scratch reg */ \
211 swi r19, r1, PTO+PT_R19; \
212 swi r20, r1, PTO+PT_R20; \
213 swi r21, r1, PTO+PT_R21; \
214 swi r22, r1, PTO+PT_R22; \
215 swi r23, r1, PTO+PT_R23; \
216 swi r24, r1, PTO+PT_R24; \
217 swi r25, r1, PTO+PT_R25; \
218 swi r26, r1, PTO+PT_R26; \
219 swi r27, r1, PTO+PT_R27; \
220 swi r28, r1, PTO+PT_R28; \
221 swi r29, r1, PTO+PT_R29; \
222 swi r30, r1, PTO+PT_R30; \
223 swi r31, r1, PTO+PT_R31; /* Save current task reg */ \
224 mfs r11, rmsr; /* save MSR */ \
225 nop; \
226 swi r11, r1, PTO+PT_MSR;
227
228 #define RESTORE_REGS \
229 lwi r11, r1, PTO+PT_MSR; \
230 mts rmsr , r11; \
231 nop; \
232 lwi r2, r1, PTO+PT_R2; /* restore SDA */ \
233 lwi r3, r1, PTO+PT_R3; \
234 lwi r4, r1, PTO+PT_R4; \
235 lwi r5, r1, PTO+PT_R5; \
236 lwi r6, r1, PTO+PT_R6; \
237 lwi r7, r1, PTO+PT_R7; \
238 lwi r8, r1, PTO+PT_R8; \
239 lwi r9, r1, PTO+PT_R9; \
240 lwi r10, r1, PTO+PT_R10; \
241 lwi r11, r1, PTO+PT_R11; /* restore clobbered regs after rval */\
242 lwi r12, r1, PTO+PT_R12; \
243 lwi r13, r1, PTO+PT_R13; /* restore SDA2 */ \
244 lwi r14, r1, PTO+PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
245 lwi r15, r1, PTO+PT_R15; /* restore LP */ \
246 lwi r18, r1, PTO+PT_R18; /* restore asm scratch reg */ \
247 lwi r19, r1, PTO+PT_R19; \
248 lwi r20, r1, PTO+PT_R20; \
249 lwi r21, r1, PTO+PT_R21; \
250 lwi r22, r1, PTO+PT_R22; \
251 lwi r23, r1, PTO+PT_R23; \
252 lwi r24, r1, PTO+PT_R24; \
253 lwi r25, r1, PTO+PT_R25; \
254 lwi r26, r1, PTO+PT_R26; \
255 lwi r27, r1, PTO+PT_R27; \
256 lwi r28, r1, PTO+PT_R28; \
257 lwi r29, r1, PTO+PT_R29; \
258 lwi r30, r1, PTO+PT_R30; \
259 lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */
260
261 .text
262
263 /*
264 * User trap.
265 *
266 * System calls are handled here.
267 *
268 * Syscall protocol:
269 * Syscall number in r12, args in r5-r10
270 * Return value in r3
271 *
272 * Trap entered via brki instruction, so BIP bit is set, and interrupts
273 * are masked. This is nice, means we don't have to CLI before state save
274 */
275 C_ENTRY(_user_exception):
276 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
277 addi r14, r14, 4 /* return address is 4 byte after call */
278 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
279
280 lwi r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
281 beqi r11, 1f; /* Jump ahead if coming from user */
282 /* Kernel-mode state save. */
283 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
284 tophys(r1,r11);
285 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
286 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
287
288 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
289 SAVE_REGS
290
291 addi r11, r0, 1; /* Was in kernel-mode. */
292 swi r11, r1, PTO+PT_MODE; /* pt_regs -> kernel mode */
293 brid 2f;
294 nop; /* Fill delay slot */
295
296 /* User-mode state save. */
297 1:
298 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
299 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
300 tophys(r1,r1);
301 lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
302 /* calculate kernel stack pointer from task struct 8k */
303 addik r1, r1, THREAD_SIZE;
304 tophys(r1,r1);
305
306 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
307 SAVE_REGS
308
309 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */
310 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
311 swi r11, r1, PTO+PT_R1; /* Store user SP. */
312 addi r11, r0, 1;
313 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
314 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
315 /* Save away the syscall number. */
316 swi r12, r1, PTO+PT_R0;
317 tovirt(r1,r1)
318
319 /* where the trap should return need -8 to adjust for rtsd r15, 8*/
320 /* Jump to the appropriate function for the system call number in r12
321 * (r12 is not preserved), or return an error if r12 is not valid. The LP
322 * register should point to the location where
323 * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
324
325 # Step into virtual mode.
326 set_vms;
327 addik r11, r0, 3f
328 rtid r11, 0
329 nop
330 3:
331 lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
332 lwi r11, r11, TI_FLAGS /* get flags in thread info */
333 andi r11, r11, _TIF_WORK_SYSCALL_MASK
334 beqi r11, 4f
335
336 addik r3, r0, -ENOSYS
337 swi r3, r1, PTO + PT_R3
338 brlid r15, do_syscall_trace_enter
339 addik r5, r1, PTO + PT_R0
340
341 # do_syscall_trace_enter returns the new syscall nr.
342 addk r12, r0, r3
343 lwi r5, r1, PTO+PT_R5;
344 lwi r6, r1, PTO+PT_R6;
345 lwi r7, r1, PTO+PT_R7;
346 lwi r8, r1, PTO+PT_R8;
347 lwi r9, r1, PTO+PT_R9;
348 lwi r10, r1, PTO+PT_R10;
349 4:
350 /* Jump to the appropriate function for the system call number in r12
351 * (r12 is not preserved), or return an error if r12 is not valid.
352 * The LP register should point to the location where the called function
353 * should return. [note that MAKE_SYS_CALL uses label 1] */
354 /* See if the system call number is valid */
355 addi r11, r12, -__NR_syscalls;
356 bgei r11,5f;
357 /* Figure out which function to use for this system call. */
358 /* Note Microblaze barrel shift is optional, so don't rely on it */
359 add r12, r12, r12; /* convert num -> ptr */
360 add r12, r12, r12;
361
362 #ifdef DEBUG
363 /* Trac syscalls and stored them to r0_ram */
364 lwi r3, r12, 0x400 + r0_ram
365 addi r3, r3, 1
366 swi r3, r12, 0x400 + r0_ram
367 #endif
368
369 # Find and jump into the syscall handler.
370 lwi r12, r12, sys_call_table
371 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
372 la r15, r0, ret_from_trap-8
373 bra r12
374
375 /* The syscall number is invalid, return an error. */
376 5:
377 addi r3, r0, -ENOSYS;
378 rtsd r15,8; /* looks like a normal subroutine return */
379 or r0, r0, r0
380
381
382 /* Entry point used to return from a syscall/trap */
383 /* We re-enable BIP bit before state restore */
384 C_ENTRY(ret_from_trap):
385 set_bip; /* Ints masked for state restore*/
386 swi r3, r1, PTO + PT_R3
387 swi r4, r1, PTO + PT_R4
388
389 lwi r11, r1, PTO+PT_MODE;
390 /* See if returning to kernel mode, if so, skip resched &c. */
391 bnei r11, 2f;
392 /* We're returning to user mode, so check for various conditions that
393 * trigger rescheduling. */
394 /* FIXME: Restructure all these flag checks. */
395 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
396 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
397 andi r11, r11, _TIF_WORK_SYSCALL_MASK
398 beqi r11, 1f
399
400 brlid r15, do_syscall_trace_leave
401 addik r5, r1, PTO + PT_R0
402 1:
403 /* We're returning to user mode, so check for various conditions that
404 * trigger rescheduling. */
405 /* get thread info from current task */
406 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
407 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
408 andi r11, r11, _TIF_NEED_RESCHED;
409 beqi r11, 5f;
410
411 bralid r15, schedule; /* Call scheduler */
412 nop; /* delay slot */
413
414 /* Maybe handle a signal */
415 5: /* get thread info from current task*/
416 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
417 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
418 andi r11, r11, _TIF_SIGPENDING;
419 beqi r11, 1f; /* Signals to handle, handle them */
420
421 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
422 addi r7, r0, 1; /* Arg 3: int in_syscall */
423 bralid r15, do_signal; /* Handle any signals */
424 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
425
426 /* Finally, return to user state. */
427 1:
428 swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
429 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
430 VM_OFF;
431 tophys(r1,r1);
432 RESTORE_REGS;
433 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
434 lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
435 bri 6f;
436
437 /* Return to kernel state. */
438 2: VM_OFF;
439 tophys(r1,r1);
440 RESTORE_REGS;
441 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
442 tovirt(r1,r1);
443 6:
444 TRAP_return: /* Make global symbol for debugging */
445 rtbd r14, 0; /* Instructions to return from an IRQ */
446 nop;
447
448
449 /* These syscalls need access to the struct pt_regs on the stack, so we
450 implement them in assembly (they're basically all wrappers anyway). */
451
452 C_ENTRY(sys_fork_wrapper):
453 addi r5, r0, SIGCHLD /* Arg 0: flags */
454 lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */
455 la r7, r1, PTO /* Arg 2: parent context */
456 add r8. r0, r0 /* Arg 3: (unused) */
457 add r9, r0, r0; /* Arg 4: (unused) */
458 add r10, r0, r0; /* Arg 5: (unused) */
459 brid do_fork /* Do real work (tail-call) */
460 nop;
461
462 /* This the initial entry point for a new child thread, with an appropriate
463 stack in place that makes it look the the child is in the middle of an
464 syscall. This function is actually `returned to' from switch_thread
465 (copy_thread makes ret_from_fork the return address in each new thread's
466 saved context). */
467 C_ENTRY(ret_from_fork):
468 bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
469 add r3, r5, r0; /* switch_thread returns the prev task */
470 /* ( in the delay slot ) */
471 add r3, r0, r0; /* Child's fork call should return 0. */
472 brid ret_from_trap; /* Do normal trap return */
473 nop;
474
475 C_ENTRY(sys_vfork):
476 brid microblaze_vfork /* Do real work (tail-call) */
477 la r5, r1, PTO
478
479 C_ENTRY(sys_clone):
480 bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */
481 lwi r6, r1, PTO + PT_R1; /* If so, use paret's stack ptr */
482 1: add r10, r0, r9; /* Arg 6: (child_tidptr) */
483 add r9, r0, r8; /* Arg 5: (parent_tidptr) */
484 add r8, r0, r7; /* Arg 4: (stack_size) */
485 la r7, r1, PTO; /* Arg 3: pt_regs */
486 brid do_fork /* Do real work (tail-call) */
487 nop
488
489 C_ENTRY(sys_execve):
490 la r8, r1, PTO; /* add user context as 4th arg */
491 brid microblaze_execve; /* Do real work (tail-call).*/
492 nop;
493
494 C_ENTRY(sys_rt_sigreturn_wrapper):
495 swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
496 swi r4, r1, PTO+PT_R4;
497 la r5, r1, PTO; /* add user context as 1st arg */
498 brlid r15, sys_rt_sigreturn /* Do real work */
499 nop;
500 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
501 lwi r4, r1, PTO+PT_R4;
502 bri ret_from_trap /* fall through will not work here due to align */
503 nop;
504
505 /*
506 * HW EXCEPTION rutine start
507 */
508
509 #define SAVE_STATE \
510 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */ \
511 set_bip; /*equalize initial state for all possible entries*/\
512 clear_eip; \
513 enable_irq; \
514 set_ee; \
515 /* See if already in kernel mode.*/ \
516 lwi r11, r0, TOPHYS(PER_CPU(KM)); \
517 beqi r11, 1f; /* Jump ahead if coming from user */\
518 /* Kernel-mode state save. */ \
519 /* Reload kernel stack-ptr. */ \
520 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
521 tophys(r1,r11); \
522 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */ \
523 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
524 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
525 SAVE_REGS \
526 /* PC, before IRQ/trap - this is one instruction above */ \
527 swi r17, r1, PTO+PT_PC; \
528 \
529 addi r11, r0, 1; /* Was in kernel-mode. */ \
530 swi r11, r1, PTO+PT_MODE; \
531 brid 2f; \
532 nop; /* Fill delay slot */ \
533 1: /* User-mode state save. */ \
534 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
535 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
536 tophys(r1,r1); \
537 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
538 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */\
539 tophys(r1,r1); \
540 \
541 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
542 SAVE_REGS \
543 /* PC, before IRQ/trap - this is one instruction above FIXME*/ \
544 swi r17, r1, PTO+PT_PC; \
545 \
546 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */ \
547 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
548 swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
549 addi r11, r0, 1; \
550 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\
551 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); \
552 /* Save away the syscall number. */ \
553 swi r0, r1, PTO+PT_R0; \
554 tovirt(r1,r1)
555
556 C_ENTRY(full_exception_trap):
557 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
558 /* adjust exception address for privileged instruction
559 * for finding where is it */
560 addik r17, r17, -4
561 SAVE_STATE /* Save registers */
562 /* FIXME this can be store directly in PT_ESR reg.
563 * I tested it but there is a fault */
564 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
565 la r15, r0, ret_from_exc - 8
566 la r5, r1, PTO /* parameter struct pt_regs * regs */
567 mfs r6, resr
568 nop
569 mfs r7, rfsr; /* save FSR */
570 nop
571 mts rfsr, r0; /* Clear sticky fsr */
572 nop
573 la r12, r0, full_exception
574 set_vms;
575 rtbd r12, 0;
576 nop;
577
578 /*
579 * Unaligned data trap.
580 *
581 * Unaligned data trap last on 4k page is handled here.
582 *
583 * Trap entered via exception, so EE bit is set, and interrupts
584 * are masked. This is nice, means we don't have to CLI before state save
585 *
586 * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
587 */
588 C_ENTRY(unaligned_data_trap):
589 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
590 SAVE_STATE /* Save registers.*/
591 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
592 la r15, r0, ret_from_exc-8
593 mfs r3, resr /* ESR */
594 nop
595 mfs r4, rear /* EAR */
596 nop
597 la r7, r1, PTO /* parameter struct pt_regs * regs */
598 la r12, r0, _unaligned_data_exception
599 set_vms;
600 rtbd r12, 0; /* interrupts enabled */
601 nop;
602
603 /*
604 * Page fault traps.
605 *
606 * If the real exception handler (from hw_exception_handler.S) didn't find
607 * the mapping for the process, then we're thrown here to handle such situation.
608 *
609 * Trap entered via exceptions, so EE bit is set, and interrupts
610 * are masked. This is nice, means we don't have to CLI before state save
611 *
612 * Build a standard exception frame for TLB Access errors. All TLB exceptions
613 * will bail out to this point if they can't resolve the lightweight TLB fault.
614 *
615 * The C function called is in "arch/microblaze/mm/fault.c", declared as:
616 * void do_page_fault(struct pt_regs *regs,
617 * unsigned long address,
618 * unsigned long error_code)
619 */
620 /* data and intruction trap - which is choose is resolved int fault.c */
621 C_ENTRY(page_fault_data_trap):
622 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
623 SAVE_STATE /* Save registers.*/
624 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
625 la r15, r0, ret_from_exc-8
626 la r5, r1, PTO /* parameter struct pt_regs * regs */
627 mfs r6, rear /* parameter unsigned long address */
628 nop
629 mfs r7, resr /* parameter unsigned long error_code */
630 nop
631 la r12, r0, do_page_fault
632 set_vms;
633 rtbd r12, 0; /* interrupts enabled */
634 nop;
635
636 C_ENTRY(page_fault_instr_trap):
637 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
638 SAVE_STATE /* Save registers.*/
639 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
640 la r15, r0, ret_from_exc-8
641 la r5, r1, PTO /* parameter struct pt_regs * regs */
642 mfs r6, rear /* parameter unsigned long address */
643 nop
644 ori r7, r0, 0 /* parameter unsigned long error_code */
645 la r12, r0, do_page_fault
646 set_vms;
647 rtbd r12, 0; /* interrupts enabled */
648 nop;
649
650 /* Entry point used to return from an exception. */
651 C_ENTRY(ret_from_exc):
652 set_bip; /* Ints masked for state restore*/
653 lwi r11, r1, PTO+PT_MODE;
654 bnei r11, 2f; /* See if returning to kernel mode, */
655 /* ... if so, skip resched &c. */
656
657 /* We're returning to user mode, so check for various conditions that
658 trigger rescheduling. */
659 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
660 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
661 andi r11, r11, _TIF_NEED_RESCHED;
662 beqi r11, 5f;
663
664 /* Call the scheduler before returning from a syscall/trap. */
665 bralid r15, schedule; /* Call scheduler */
666 nop; /* delay slot */
667
668 /* Maybe handle a signal */
669 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
670 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
671 andi r11, r11, _TIF_SIGPENDING;
672 beqi r11, 1f; /* Signals to handle, handle them */
673
674 /*
675 * Handle a signal return; Pending signals should be in r18.
676 *
677 * Not all registers are saved by the normal trap/interrupt entry
678 * points (for instance, call-saved registers (because the normal
679 * C-compiler calling sequence in the kernel makes sure they're
680 * preserved), and call-clobbered registers in the case of
681 * traps), but signal handlers may want to examine or change the
682 * complete register state. Here we save anything not saved by
683 * the normal entry sequence, so that it may be safely restored
684 * (in a possibly modified form) after do_signal returns. */
685 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
686 addi r7, r0, 0; /* Arg 3: int in_syscall */
687 bralid r15, do_signal; /* Handle any signals */
688 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
689
690 /* Finally, return to user state. */
691 1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
692 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
693 VM_OFF;
694 tophys(r1,r1);
695
696 RESTORE_REGS;
697 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
698
699 lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
700 bri 6f;
701 /* Return to kernel state. */
702 2: VM_OFF;
703 tophys(r1,r1);
704 RESTORE_REGS;
705 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
706
707 tovirt(r1,r1);
708 6:
709 EXC_return: /* Make global symbol for debugging */
710 rtbd r14, 0; /* Instructions to return from an IRQ */
711 nop;
712
713 /*
714 * HW EXCEPTION rutine end
715 */
716
717 /*
718 * Hardware maskable interrupts.
719 *
720 * The stack-pointer (r1) should have already been saved to the memory
721 * location PER_CPU(ENTRY_SP).
722 */
723 C_ENTRY(_interrupt):
724 /* MS: we are in physical address */
725 /* Save registers, switch to proper stack, convert SP to virtual.*/
726 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
727 swi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
728 /* MS: See if already in kernel mode. */
729 lwi r11, r0, TOPHYS(PER_CPU(KM));
730 beqi r11, 1f; /* MS: Jump ahead if coming from user */
731
732 /* Kernel-mode state save. */
733 or r11, r1, r0
734 tophys(r1,r11); /* MS: I have in r1 physical address where stack is */
735 /* MS: Save original SP - position PT_R1 to next stack frame 4 *1 - 152*/
736 swi r11, r1, (PT_R1 - PT_SIZE);
737 /* MS: restore r11 because of saving in SAVE_REGS */
738 lwi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
739 /* save registers */
740 /* MS: Make room on the stack -> activation record */
741 addik r1, r1, -STATE_SAVE_SIZE;
742 SAVE_REGS
743 /* MS: store mode */
744 addi r11, r0, 1; /* MS: Was in kernel-mode. */
745 swi r11, r1, PTO + PT_MODE; /* MS: and save it */
746 brid 2f;
747 nop; /* MS: Fill delay slot */
748
749 1:
750 /* User-mode state save. */
751 /* MS: restore r11 -> FIXME move before SAVE_REG */
752 lwi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
753 /* MS: get the saved current */
754 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
755 tophys(r1,r1);
756 lwi r1, r1, TS_THREAD_INFO;
757 addik r1, r1, THREAD_SIZE;
758 tophys(r1,r1);
759 /* save registers */
760 addik r1, r1, -STATE_SAVE_SIZE;
761 SAVE_REGS
762 /* calculate mode */
763 swi r0, r1, PTO + PT_MODE;
764 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
765 swi r11, r1, PTO+PT_R1;
766 /* setup kernel mode to KM */
767 addi r11, r0, 1;
768 swi r11, r0, TOPHYS(PER_CPU(KM));
769
770 2:
771 lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
772 swi r0, r1, PTO + PT_R0;
773 tovirt(r1,r1)
774 la r5, r1, PTO;
775 set_vms;
776 la r11, r0, do_IRQ;
777 la r15, r0, irq_call;
778 irq_call:rtbd r11, 0;
779 nop;
780
781 /* MS: we are in virtual mode */
782 ret_from_irq:
783 lwi r11, r1, PTO + PT_MODE;
784 bnei r11, 2f;
785
786 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
787 lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
788 andi r11, r11, _TIF_NEED_RESCHED;
789 beqi r11, 5f
790 bralid r15, schedule;
791 nop; /* delay slot */
792
793 /* Maybe handle a signal */
794 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */
795 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
796 andi r11, r11, _TIF_SIGPENDING;
797 beqid r11, no_intr_resched
798 /* Handle a signal return; Pending signals should be in r18. */
799 addi r7, r0, 0; /* Arg 3: int in_syscall */
800 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
801 bralid r15, do_signal; /* Handle any signals */
802 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
803
804 /* Finally, return to user state. */
805 no_intr_resched:
806 /* Disable interrupts, we are now committed to the state restore */
807 disable_irq
808 swi r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */
809 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
810 VM_OFF;
811 tophys(r1,r1);
812 RESTORE_REGS
813 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
814 lwi r1, r1, PT_R1 - PT_SIZE;
815 bri 6f;
816 /* MS: Return to kernel state. */
817 2:
818 #ifdef CONFIG_PREEMPT
819 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
820 /* MS: get preempt_count from thread info */
821 lwi r5, r11, TI_PREEMPT_COUNT;
822 bgti r5, restore;
823
824 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
825 andi r5, r5, _TIF_NEED_RESCHED;
826 beqi r5, restore /* if zero jump over */
827
828 preempt:
829 /* interrupts are off that's why I am calling preempt_chedule_irq */
830 bralid r15, preempt_schedule_irq
831 nop
832 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
833 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
834 andi r5, r5, _TIF_NEED_RESCHED;
835 bnei r5, preempt /* if non zero jump to resched */
836 restore:
837 #endif
838 VM_OFF /* MS: turn off MMU */
839 tophys(r1,r1)
840 RESTORE_REGS
841 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
842 tovirt(r1,r1);
843 6:
844 IRQ_return: /* MS: Make global symbol for debugging */
845 rtid r14, 0
846 nop
847
848 /*
849 * `Debug' trap
850 * We enter dbtrap in "BIP" (breakpoint) mode.
851 * So we exit the breakpoint mode with an 'rtbd' and proceed with the
852 * original dbtrap.
853 * however, wait to save state first
854 */
855 C_ENTRY(_debug_exception):
856 /* BIP bit is set on entry, no interrupts can occur */
857 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
858
859 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
860 set_bip; /*equalize initial state for all possible entries*/
861 clear_eip;
862 enable_irq;
863 lwi r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
864 beqi r11, 1f; /* Jump ahead if coming from user */
865 /* Kernel-mode state save. */
866 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
867 tophys(r1,r11);
868 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
869 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
870
871 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
872 SAVE_REGS;
873
874 addi r11, r0, 1; /* Was in kernel-mode. */
875 swi r11, r1, PTO + PT_MODE;
876 brid 2f;
877 nop; /* Fill delay slot */
878 1: /* User-mode state save. */
879 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
880 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
881 tophys(r1,r1);
882 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
883 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
884 tophys(r1,r1);
885
886 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
887 SAVE_REGS;
888
889 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */
890 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
891 swi r11, r1, PTO+PT_R1; /* Store user SP. */
892 addi r11, r0, 1;
893 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
894 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
895 /* Save away the syscall number. */
896 swi r0, r1, PTO+PT_R0;
897 tovirt(r1,r1)
898
899 addi r5, r0, SIGTRAP /* send the trap signal */
900 add r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */
901 addk r7, r0, r0 /* 3rd param zero */
902
903 set_vms;
904 la r11, r0, send_sig;
905 la r15, r0, dbtrap_call;
906 dbtrap_call: rtbd r11, 0;
907 nop;
908
909 set_bip; /* Ints masked for state restore*/
910 lwi r11, r1, PTO+PT_MODE;
911 bnei r11, 2f;
912
913 /* Get current task ptr into r11 */
914 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
915 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
916 andi r11, r11, _TIF_NEED_RESCHED;
917 beqi r11, 5f;
918
919 /* Call the scheduler before returning from a syscall/trap. */
920
921 bralid r15, schedule; /* Call scheduler */
922 nop; /* delay slot */
923 /* XXX Is PT_DTRACE handling needed here? */
924 /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */
925
926 /* Maybe handle a signal */
927 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
928 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
929 andi r11, r11, _TIF_SIGPENDING;
930 beqi r11, 1f; /* Signals to handle, handle them */
931
932 /* Handle a signal return; Pending signals should be in r18. */
933 /* Not all registers are saved by the normal trap/interrupt entry
934 points (for instance, call-saved registers (because the normal
935 C-compiler calling sequence in the kernel makes sure they're
936 preserved), and call-clobbered registers in the case of
937 traps), but signal handlers may want to examine or change the
938 complete register state. Here we save anything not saved by
939 the normal entry sequence, so that it may be safely restored
940 (in a possibly modified form) after do_signal returns. */
941
942 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
943 addi r7, r0, 0; /* Arg 3: int in_syscall */
944 bralid r15, do_signal; /* Handle any signals */
945 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
946
947
948 /* Finally, return to user state. */
949 1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
950 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
951 VM_OFF;
952 tophys(r1,r1);
953
954 RESTORE_REGS
955 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
956
957
958 lwi r1, r1, PT_R1 - PT_SIZE;
959 /* Restore user stack pointer. */
960 bri 6f;
961
962 /* Return to kernel state. */
963 2: VM_OFF;
964 tophys(r1,r1);
965 RESTORE_REGS
966 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
967
968 tovirt(r1,r1);
969 6:
970 DBTRAP_return: /* Make global symbol for debugging */
971 rtbd r14, 0; /* Instructions to return from an IRQ */
972 nop;
973
974
975
976 ENTRY(_switch_to)
977 /* prepare return value */
978 addk r3, r0, CURRENT_TASK
979
980 /* save registers in cpu_context */
981 /* use r11 and r12, volatile registers, as temp register */
982 /* give start of cpu_context for previous process */
983 addik r11, r5, TI_CPU_CONTEXT
984 swi r1, r11, CC_R1
985 swi r2, r11, CC_R2
986 /* skip volatile registers.
987 * they are saved on stack when we jumped to _switch_to() */
988 /* dedicated registers */
989 swi r13, r11, CC_R13
990 swi r14, r11, CC_R14
991 swi r15, r11, CC_R15
992 swi r16, r11, CC_R16
993 swi r17, r11, CC_R17
994 swi r18, r11, CC_R18
995 /* save non-volatile registers */
996 swi r19, r11, CC_R19
997 swi r20, r11, CC_R20
998 swi r21, r11, CC_R21
999 swi r22, r11, CC_R22
1000 swi r23, r11, CC_R23
1001 swi r24, r11, CC_R24
1002 swi r25, r11, CC_R25
1003 swi r26, r11, CC_R26
1004 swi r27, r11, CC_R27
1005 swi r28, r11, CC_R28
1006 swi r29, r11, CC_R29
1007 swi r30, r11, CC_R30
1008 /* special purpose registers */
1009 mfs r12, rmsr
1010 nop
1011 swi r12, r11, CC_MSR
1012 mfs r12, rear
1013 nop
1014 swi r12, r11, CC_EAR
1015 mfs r12, resr
1016 nop
1017 swi r12, r11, CC_ESR
1018 mfs r12, rfsr
1019 nop
1020 swi r12, r11, CC_FSR
1021
1022 /* update r31, the current-give me pointer to task which will be next */
1023 lwi CURRENT_TASK, r6, TI_TASK
1024 /* stored it to current_save too */
1025 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
1026
1027 /* get new process' cpu context and restore */
1028 /* give me start where start context of next task */
1029 addik r11, r6, TI_CPU_CONTEXT
1030
1031 /* non-volatile registers */
1032 lwi r30, r11, CC_R30
1033 lwi r29, r11, CC_R29
1034 lwi r28, r11, CC_R28
1035 lwi r27, r11, CC_R27
1036 lwi r26, r11, CC_R26
1037 lwi r25, r11, CC_R25
1038 lwi r24, r11, CC_R24
1039 lwi r23, r11, CC_R23
1040 lwi r22, r11, CC_R22
1041 lwi r21, r11, CC_R21
1042 lwi r20, r11, CC_R20
1043 lwi r19, r11, CC_R19
1044 /* dedicated registers */
1045 lwi r18, r11, CC_R18
1046 lwi r17, r11, CC_R17
1047 lwi r16, r11, CC_R16
1048 lwi r15, r11, CC_R15
1049 lwi r14, r11, CC_R14
1050 lwi r13, r11, CC_R13
1051 /* skip volatile registers */
1052 lwi r2, r11, CC_R2
1053 lwi r1, r11, CC_R1
1054
1055 /* special purpose registers */
1056 lwi r12, r11, CC_FSR
1057 mts rfsr, r12
1058 nop
1059 lwi r12, r11, CC_MSR
1060 mts rmsr, r12
1061 nop
1062
1063 rtsd r15, 8
1064 nop
1065
1066 ENTRY(_reset)
1067 brai 0x70; /* Jump back to FS-boot */
1068
1069 ENTRY(_break)
1070 mfs r5, rmsr
1071 nop
1072 swi r5, r0, 0x250 + TOPHYS(r0_ram)
1073 mfs r5, resr
1074 nop
1075 swi r5, r0, 0x254 + TOPHYS(r0_ram)
1076 bri 0
1077
1078 /* These are compiled and loaded into high memory, then
1079 * copied into place in mach_early_setup */
1080 .section .init.ivt, "ax"
1081 .org 0x0
1082 /* this is very important - here is the reset vector */
1083 /* in current MMU branch you don't care what is here - it is
1084 * used from bootloader site - but this is correct for FS-BOOT */
1085 brai 0x70
1086 nop
1087 brai TOPHYS(_user_exception); /* syscall handler */
1088 brai TOPHYS(_interrupt); /* Interrupt handler */
1089 brai TOPHYS(_break); /* nmi trap handler */
1090 brai TOPHYS(_hw_exception_handler); /* HW exception handler */
1091
1092 .org 0x60
1093 brai TOPHYS(_debug_exception); /* debug trap handler*/
1094
1095 .section .rodata,"a"
1096 #include "syscall_table.S"
1097
1098 syscall_table_size=(.-sys_call_table)
1099
1100 type_SYSCALL:
1101 .ascii "SYSCALL\0"
1102 type_IRQ:
1103 .ascii "IRQ\0"
1104 type_IRQ_PREEMPT:
1105 .ascii "IRQ (PREEMPTED)\0"
1106 type_SYSCALL_PREEMPT:
1107 .ascii " SYSCALL (PREEMPTED)\0"
1108
1109 /*
1110 * Trap decoding for stack unwinder
1111 * Tuples are (start addr, end addr, string)
1112 * If return address lies on [start addr, end addr],
1113 * unwinder displays 'string'
1114 */
1115
1116 .align 4
1117 .global microblaze_trap_handlers
1118 microblaze_trap_handlers:
1119 /* Exact matches come first */
1120 .word ret_from_trap; .word ret_from_trap ; .word type_SYSCALL
1121 .word ret_from_irq ; .word ret_from_irq ; .word type_IRQ
1122 /* Fuzzy matches go here */
1123 .word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT
1124 .word ret_from_trap; .word TRAP_return ; .word type_SYSCALL_PREEMPT
1125 /* End of table */
1126 .word 0 ; .word 0 ; .word 0
This page took 0.084168 seconds and 5 git commands to generate.