microblaze: Simplify entry.S - save/restore r3/r4 - ret_from_trap
[deliverable/linux.git] / arch / microblaze / kernel / entry.S
1 /*
2 * Low-level system-call handling, trap handlers and context-switching
3 *
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008-2009 PetaLogix
6 * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
7 * Copyright (C) 2001,2002 NEC Corporation
8 * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
9 *
10 * This file is subject to the terms and conditions of the GNU General
11 * Public License. See the file COPYING in the main directory of this
12 * archive for more details.
13 *
14 * Written by Miles Bader <miles@gnu.org>
15 * Heavily modified by John Williams for Microblaze
16 */
17
18 #include <linux/sys.h>
19 #include <linux/linkage.h>
20
21 #include <asm/entry.h>
22 #include <asm/current.h>
23 #include <asm/processor.h>
24 #include <asm/exceptions.h>
25 #include <asm/asm-offsets.h>
26 #include <asm/thread_info.h>
27
28 #include <asm/page.h>
29 #include <asm/unistd.h>
30
31 #include <linux/errno.h>
32 #include <asm/signal.h>
33
34 #undef DEBUG
35
36 /* The size of a state save frame. */
37 #define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE)
38
39 /* The offset of the struct pt_regs in a `state save frame' on the stack. */
40 #define PTO STATE_SAVE_ARG_SPACE /* 24 the space for args */
41
42 #define C_ENTRY(name) .globl name; .align 4; name
43
44 /*
45 * Various ways of setting and clearing BIP in flags reg.
46 * This is mucky, but necessary using microblaze version that
47 * allows msr ops to write to BIP
48 */
49 #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
50 .macro clear_bip
51 msrclr r11, MSR_BIP
52 nop
53 .endm
54
55 .macro set_bip
56 msrset r11, MSR_BIP
57 nop
58 .endm
59
60 .macro clear_eip
61 msrclr r11, MSR_EIP
62 nop
63 .endm
64
65 .macro set_ee
66 msrset r11, MSR_EE
67 nop
68 .endm
69
70 .macro disable_irq
71 msrclr r11, MSR_IE
72 nop
73 .endm
74
75 .macro enable_irq
76 msrset r11, MSR_IE
77 nop
78 .endm
79
80 .macro set_ums
81 msrset r11, MSR_UMS
82 nop
83 msrclr r11, MSR_VMS
84 nop
85 .endm
86
87 .macro set_vms
88 msrclr r11, MSR_UMS
89 nop
90 msrset r11, MSR_VMS
91 nop
92 .endm
93
94 .macro clear_vms_ums
95 msrclr r11, MSR_VMS
96 nop
97 msrclr r11, MSR_UMS
98 nop
99 .endm
100 #else
101 .macro clear_bip
102 mfs r11, rmsr
103 nop
104 andi r11, r11, ~MSR_BIP
105 mts rmsr, r11
106 nop
107 .endm
108
109 .macro set_bip
110 mfs r11, rmsr
111 nop
112 ori r11, r11, MSR_BIP
113 mts rmsr, r11
114 nop
115 .endm
116
117 .macro clear_eip
118 mfs r11, rmsr
119 nop
120 andi r11, r11, ~MSR_EIP
121 mts rmsr, r11
122 nop
123 .endm
124
125 .macro set_ee
126 mfs r11, rmsr
127 nop
128 ori r11, r11, MSR_EE
129 mts rmsr, r11
130 nop
131 .endm
132
133 .macro disable_irq
134 mfs r11, rmsr
135 nop
136 andi r11, r11, ~MSR_IE
137 mts rmsr, r11
138 nop
139 .endm
140
141 .macro enable_irq
142 mfs r11, rmsr
143 nop
144 ori r11, r11, MSR_IE
145 mts rmsr, r11
146 nop
147 .endm
148
149 .macro set_ums
150 mfs r11, rmsr
151 nop
152 ori r11, r11, MSR_VMS
153 andni r11, r11, MSR_UMS
154 mts rmsr, r11
155 nop
156 .endm
157
158 .macro set_vms
159 mfs r11, rmsr
160 nop
161 ori r11, r11, MSR_VMS
162 andni r11, r11, MSR_UMS
163 mts rmsr, r11
164 nop
165 .endm
166
167 .macro clear_vms_ums
168 mfs r11, rmsr
169 nop
170 andni r11, r11, (MSR_VMS|MSR_UMS)
171 mts rmsr,r11
172 nop
173 .endm
174 #endif
175
176 /* Define how to call high-level functions. With MMU, virtual mode must be
177 * enabled when calling the high-level function. Clobbers R11.
178 * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
179 */
180
181 /* turn on virtual protected mode save */
182 #define VM_ON \
183 set_ums; \
184 rted r0, 2f; \
185 2: nop;
186
187 /* turn off virtual protected mode save and user mode save*/
188 #define VM_OFF \
189 clear_vms_ums; \
190 rted r0, TOPHYS(1f); \
191 1: nop;
192
193 #define SAVE_REGS \
194 swi r2, r1, PTO+PT_R2; /* Save SDA */ \
195 swi r5, r1, PTO+PT_R5; \
196 swi r6, r1, PTO+PT_R6; \
197 swi r7, r1, PTO+PT_R7; \
198 swi r8, r1, PTO+PT_R8; \
199 swi r9, r1, PTO+PT_R9; \
200 swi r10, r1, PTO+PT_R10; \
201 swi r11, r1, PTO+PT_R11; /* save clobbered regs after rval */\
202 swi r12, r1, PTO+PT_R12; \
203 swi r13, r1, PTO+PT_R13; /* Save SDA2 */ \
204 swi r14, r1, PTO+PT_PC; /* PC, before IRQ/trap */ \
205 swi r15, r1, PTO+PT_R15; /* Save LP */ \
206 swi r18, r1, PTO+PT_R18; /* Save asm scratch reg */ \
207 swi r19, r1, PTO+PT_R19; \
208 swi r20, r1, PTO+PT_R20; \
209 swi r21, r1, PTO+PT_R21; \
210 swi r22, r1, PTO+PT_R22; \
211 swi r23, r1, PTO+PT_R23; \
212 swi r24, r1, PTO+PT_R24; \
213 swi r25, r1, PTO+PT_R25; \
214 swi r26, r1, PTO+PT_R26; \
215 swi r27, r1, PTO+PT_R27; \
216 swi r28, r1, PTO+PT_R28; \
217 swi r29, r1, PTO+PT_R29; \
218 swi r30, r1, PTO+PT_R30; \
219 swi r31, r1, PTO+PT_R31; /* Save current task reg */ \
220 mfs r11, rmsr; /* save MSR */ \
221 nop; \
222 swi r11, r1, PTO+PT_MSR;
223
224 #define RESTORE_REGS \
225 lwi r11, r1, PTO+PT_MSR; \
226 mts rmsr , r11; \
227 nop; \
228 lwi r2, r1, PTO+PT_R2; /* restore SDA */ \
229 lwi r5, r1, PTO+PT_R5; \
230 lwi r6, r1, PTO+PT_R6; \
231 lwi r7, r1, PTO+PT_R7; \
232 lwi r8, r1, PTO+PT_R8; \
233 lwi r9, r1, PTO+PT_R9; \
234 lwi r10, r1, PTO+PT_R10; \
235 lwi r11, r1, PTO+PT_R11; /* restore clobbered regs after rval */\
236 lwi r12, r1, PTO+PT_R12; \
237 lwi r13, r1, PTO+PT_R13; /* restore SDA2 */ \
238 lwi r14, r1, PTO+PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
239 lwi r15, r1, PTO+PT_R15; /* restore LP */ \
240 lwi r18, r1, PTO+PT_R18; /* restore asm scratch reg */ \
241 lwi r19, r1, PTO+PT_R19; \
242 lwi r20, r1, PTO+PT_R20; \
243 lwi r21, r1, PTO+PT_R21; \
244 lwi r22, r1, PTO+PT_R22; \
245 lwi r23, r1, PTO+PT_R23; \
246 lwi r24, r1, PTO+PT_R24; \
247 lwi r25, r1, PTO+PT_R25; \
248 lwi r26, r1, PTO+PT_R26; \
249 lwi r27, r1, PTO+PT_R27; \
250 lwi r28, r1, PTO+PT_R28; \
251 lwi r29, r1, PTO+PT_R29; \
252 lwi r30, r1, PTO+PT_R30; \
253 lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */
254
255 .text
256
257 /*
258 * User trap.
259 *
260 * System calls are handled here.
261 *
262 * Syscall protocol:
263 * Syscall number in r12, args in r5-r10
264 * Return value in r3
265 *
266 * Trap entered via brki instruction, so BIP bit is set, and interrupts
267 * are masked. This is nice, means we don't have to CLI before state save
268 */
269 C_ENTRY(_user_exception):
270 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
271 addi r14, r14, 4 /* return address is 4 byte after call */
272 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
273
274 lwi r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
275 beqi r11, 1f; /* Jump ahead if coming from user */
276 /* Kernel-mode state save. */
277 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
278 tophys(r1,r11);
279 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
280 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
281
282 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
283 SAVE_REGS
284
285 addi r11, r0, 1; /* Was in kernel-mode. */
286 swi r11, r1, PTO+PT_MODE; /* pt_regs -> kernel mode */
287 brid 2f;
288 nop; /* Fill delay slot */
289
290 /* User-mode state save. */
291 1:
292 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
293 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
294 tophys(r1,r1);
295 lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
296 /* calculate kernel stack pointer from task struct 8k */
297 addik r1, r1, THREAD_SIZE;
298 tophys(r1,r1);
299
300 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
301 SAVE_REGS
302
303 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */
304 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
305 swi r11, r1, PTO+PT_R1; /* Store user SP. */
306 addi r11, r0, 1;
307 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
308 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
309 /* Save away the syscall number. */
310 swi r12, r1, PTO+PT_R0;
311 tovirt(r1,r1)
312
313 /* where the trap should return need -8 to adjust for rtsd r15, 8*/
314 /* Jump to the appropriate function for the system call number in r12
315 * (r12 is not preserved), or return an error if r12 is not valid. The LP
316 * register should point to the location where
317 * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
318
319 # Step into virtual mode.
320 set_vms;
321 addik r11, r0, 3f
322 rtid r11, 0
323 nop
324 3:
325 lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
326 lwi r11, r11, TI_FLAGS /* get flags in thread info */
327 andi r11, r11, _TIF_WORK_SYSCALL_MASK
328 beqi r11, 4f
329
330 addik r3, r0, -ENOSYS
331 swi r3, r1, PTO + PT_R3
332 brlid r15, do_syscall_trace_enter
333 addik r5, r1, PTO + PT_R0
334
335 # do_syscall_trace_enter returns the new syscall nr.
336 addk r12, r0, r3
337 lwi r5, r1, PTO+PT_R5;
338 lwi r6, r1, PTO+PT_R6;
339 lwi r7, r1, PTO+PT_R7;
340 lwi r8, r1, PTO+PT_R8;
341 lwi r9, r1, PTO+PT_R9;
342 lwi r10, r1, PTO+PT_R10;
343 4:
344 /* Jump to the appropriate function for the system call number in r12
345 * (r12 is not preserved), or return an error if r12 is not valid.
346 * The LP register should point to the location where the called function
347 * should return. [note that MAKE_SYS_CALL uses label 1] */
348 /* See if the system call number is valid */
349 addi r11, r12, -__NR_syscalls;
350 bgei r11,5f;
351 /* Figure out which function to use for this system call. */
352 /* Note Microblaze barrel shift is optional, so don't rely on it */
353 add r12, r12, r12; /* convert num -> ptr */
354 add r12, r12, r12;
355
356 #ifdef DEBUG
357 /* Trac syscalls and stored them to r0_ram */
358 lwi r3, r12, 0x400 + r0_ram
359 addi r3, r3, 1
360 swi r3, r12, 0x400 + r0_ram
361 #endif
362
363 # Find and jump into the syscall handler.
364 lwi r12, r12, sys_call_table
365 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
366 la r15, r0, ret_from_trap-8
367 bra r12
368
369 /* The syscall number is invalid, return an error. */
370 5:
371 addi r3, r0, -ENOSYS;
372 rtsd r15,8; /* looks like a normal subroutine return */
373 or r0, r0, r0
374
375
376 /* Entry point used to return from a syscall/trap */
377 /* We re-enable BIP bit before state restore */
378 C_ENTRY(ret_from_trap):
379 set_bip; /* Ints masked for state restore*/
380 lwi r11, r1, PTO+PT_MODE;
381 /* See if returning to kernel mode, if so, skip resched &c. */
382 bnei r11, 2f;
383
384 swi r3, r1, PTO + PT_R3
385 swi r4, r1, PTO + PT_R4
386
387 /* We're returning to user mode, so check for various conditions that
388 * trigger rescheduling. */
389 /* FIXME: Restructure all these flag checks. */
390 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
391 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
392 andi r11, r11, _TIF_WORK_SYSCALL_MASK
393 beqi r11, 1f
394
395 brlid r15, do_syscall_trace_leave
396 addik r5, r1, PTO + PT_R0
397 1:
398 /* We're returning to user mode, so check for various conditions that
399 * trigger rescheduling. */
400 /* get thread info from current task */
401 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
402 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
403 andi r11, r11, _TIF_NEED_RESCHED;
404 beqi r11, 5f;
405
406 bralid r15, schedule; /* Call scheduler */
407 nop; /* delay slot */
408
409 /* Maybe handle a signal */
410 5: /* get thread info from current task*/
411 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
412 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
413 andi r11, r11, _TIF_SIGPENDING;
414 beqi r11, 1f; /* Signals to handle, handle them */
415
416 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
417 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
418 addi r7, r0, 1; /* Arg 3: int in_syscall */
419 bralid r15, do_signal; /* Handle any signals */
420 nop;
421
422 /* Finally, return to user state. */
423 1:
424 lwi r3, r1, PTO + PT_R3; /* restore syscall result */
425 lwi r4, r1, PTO + PT_R4;
426
427 swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
428 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
429 swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
430 VM_OFF;
431 tophys(r1,r1);
432 RESTORE_REGS;
433 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
434 lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
435 bri 6f;
436
437 /* Return to kernel state. */
438 2: VM_OFF;
439 tophys(r1,r1);
440 RESTORE_REGS;
441 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
442 tovirt(r1,r1);
443 6:
444 TRAP_return: /* Make global symbol for debugging */
445 rtbd r14, 0; /* Instructions to return from an IRQ */
446 nop;
447
448
449 /* These syscalls need access to the struct pt_regs on the stack, so we
450 implement them in assembly (they're basically all wrappers anyway). */
451
452 C_ENTRY(sys_fork_wrapper):
453 addi r5, r0, SIGCHLD /* Arg 0: flags */
454 lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */
455 la r7, r1, PTO /* Arg 2: parent context */
456 add r8. r0, r0 /* Arg 3: (unused) */
457 add r9, r0, r0; /* Arg 4: (unused) */
458 add r10, r0, r0; /* Arg 5: (unused) */
459 brid do_fork /* Do real work (tail-call) */
460 nop;
461
462 /* This the initial entry point for a new child thread, with an appropriate
463 stack in place that makes it look the the child is in the middle of an
464 syscall. This function is actually `returned to' from switch_thread
465 (copy_thread makes ret_from_fork the return address in each new thread's
466 saved context). */
467 C_ENTRY(ret_from_fork):
468 bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
469 add r3, r5, r0; /* switch_thread returns the prev task */
470 /* ( in the delay slot ) */
471 add r3, r0, r0; /* Child's fork call should return 0. */
472 brid ret_from_trap; /* Do normal trap return */
473 nop;
474
475 C_ENTRY(sys_vfork):
476 brid microblaze_vfork /* Do real work (tail-call) */
477 la r5, r1, PTO
478
479 C_ENTRY(sys_clone):
480 bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */
481 lwi r6, r1, PTO+PT_R1; /* If so, use paret's stack ptr */
482 1: la r7, r1, PTO; /* Arg 2: parent context */
483 add r8, r0, r0; /* Arg 3: (unused) */
484 add r9, r0, r0; /* Arg 4: (unused) */
485 add r10, r0, r0; /* Arg 5: (unused) */
486 brid do_fork /* Do real work (tail-call) */
487 nop;
488
489 C_ENTRY(sys_execve):
490 la r8, r1, PTO; /* add user context as 4th arg */
491 brid microblaze_execve; /* Do real work (tail-call).*/
492 nop;
493
494 C_ENTRY(sys_rt_sigreturn_wrapper):
495 swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
496 swi r4, r1, PTO+PT_R4;
497 la r5, r1, PTO; /* add user context as 1st arg */
498 brlid r15, sys_rt_sigreturn /* Do real work */
499 nop;
500 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
501 lwi r4, r1, PTO+PT_R4;
502 bri ret_from_trap /* fall through will not work here due to align */
503 nop;
504
505 /*
506 * HW EXCEPTION rutine start
507 */
508
509 #define SAVE_STATE \
510 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */ \
511 set_bip; /*equalize initial state for all possible entries*/\
512 clear_eip; \
513 enable_irq; \
514 set_ee; \
515 /* See if already in kernel mode.*/ \
516 lwi r11, r0, TOPHYS(PER_CPU(KM)); \
517 beqi r11, 1f; /* Jump ahead if coming from user */\
518 /* Kernel-mode state save. */ \
519 /* Reload kernel stack-ptr. */ \
520 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
521 tophys(r1,r11); \
522 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */ \
523 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
524 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
525 /* store return registers separately because \
526 * this macros is use for others exceptions */ \
527 swi r3, r1, PTO + PT_R3; \
528 swi r4, r1, PTO + PT_R4; \
529 SAVE_REGS \
530 /* PC, before IRQ/trap - this is one instruction above */ \
531 swi r17, r1, PTO+PT_PC; \
532 \
533 addi r11, r0, 1; /* Was in kernel-mode. */ \
534 swi r11, r1, PTO+PT_MODE; \
535 brid 2f; \
536 nop; /* Fill delay slot */ \
537 1: /* User-mode state save. */ \
538 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
539 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
540 tophys(r1,r1); \
541 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
542 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */\
543 tophys(r1,r1); \
544 \
545 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
546 /* store return registers separately because this macros \
547 * is use for others exceptions */ \
548 swi r3, r1, PTO + PT_R3; \
549 swi r4, r1, PTO + PT_R4; \
550 SAVE_REGS \
551 /* PC, before IRQ/trap - this is one instruction above FIXME*/ \
552 swi r17, r1, PTO+PT_PC; \
553 \
554 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */ \
555 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
556 swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
557 addi r11, r0, 1; \
558 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\
559 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); \
560 /* Save away the syscall number. */ \
561 swi r0, r1, PTO+PT_R0; \
562 tovirt(r1,r1)
563
564 C_ENTRY(full_exception_trap):
565 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
566 /* adjust exception address for privileged instruction
567 * for finding where is it */
568 addik r17, r17, -4
569 SAVE_STATE /* Save registers */
570 /* FIXME this can be store directly in PT_ESR reg.
571 * I tested it but there is a fault */
572 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
573 la r15, r0, ret_from_exc - 8
574 la r5, r1, PTO /* parameter struct pt_regs * regs */
575 mfs r6, resr
576 nop
577 mfs r7, rfsr; /* save FSR */
578 nop
579 mts rfsr, r0; /* Clear sticky fsr */
580 nop
581 la r12, r0, full_exception
582 set_vms;
583 rtbd r12, 0;
584 nop;
585
586 /*
587 * Unaligned data trap.
588 *
589 * Unaligned data trap last on 4k page is handled here.
590 *
591 * Trap entered via exception, so EE bit is set, and interrupts
592 * are masked. This is nice, means we don't have to CLI before state save
593 *
594 * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
595 */
596 C_ENTRY(unaligned_data_trap):
597 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
598 SAVE_STATE /* Save registers.*/
599 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
600 la r15, r0, ret_from_exc-8
601 mfs r3, resr /* ESR */
602 nop
603 mfs r4, rear /* EAR */
604 nop
605 la r7, r1, PTO /* parameter struct pt_regs * regs */
606 la r12, r0, _unaligned_data_exception
607 set_vms;
608 rtbd r12, 0; /* interrupts enabled */
609 nop;
610
611 /*
612 * Page fault traps.
613 *
614 * If the real exception handler (from hw_exception_handler.S) didn't find
615 * the mapping for the process, then we're thrown here to handle such situation.
616 *
617 * Trap entered via exceptions, so EE bit is set, and interrupts
618 * are masked. This is nice, means we don't have to CLI before state save
619 *
620 * Build a standard exception frame for TLB Access errors. All TLB exceptions
621 * will bail out to this point if they can't resolve the lightweight TLB fault.
622 *
623 * The C function called is in "arch/microblaze/mm/fault.c", declared as:
624 * void do_page_fault(struct pt_regs *regs,
625 * unsigned long address,
626 * unsigned long error_code)
627 */
628 /* data and intruction trap - which is choose is resolved int fault.c */
629 C_ENTRY(page_fault_data_trap):
630 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
631 SAVE_STATE /* Save registers.*/
632 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
633 la r15, r0, ret_from_exc-8
634 la r5, r1, PTO /* parameter struct pt_regs * regs */
635 mfs r6, rear /* parameter unsigned long address */
636 nop
637 mfs r7, resr /* parameter unsigned long error_code */
638 nop
639 la r12, r0, do_page_fault
640 set_vms;
641 rtbd r12, 0; /* interrupts enabled */
642 nop;
643
644 C_ENTRY(page_fault_instr_trap):
645 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
646 SAVE_STATE /* Save registers.*/
647 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
648 la r15, r0, ret_from_exc-8
649 la r5, r1, PTO /* parameter struct pt_regs * regs */
650 mfs r6, rear /* parameter unsigned long address */
651 nop
652 ori r7, r0, 0 /* parameter unsigned long error_code */
653 la r12, r0, do_page_fault
654 set_vms;
655 rtbd r12, 0; /* interrupts enabled */
656 nop;
657
658 /* Entry point used to return from an exception. */
659 C_ENTRY(ret_from_exc):
660 set_bip; /* Ints masked for state restore*/
661 lwi r11, r1, PTO+PT_MODE;
662 bnei r11, 2f; /* See if returning to kernel mode, */
663 /* ... if so, skip resched &c. */
664
665 /* We're returning to user mode, so check for various conditions that
666 trigger rescheduling. */
667 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
668 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
669 andi r11, r11, _TIF_NEED_RESCHED;
670 beqi r11, 5f;
671
672 /* Call the scheduler before returning from a syscall/trap. */
673 bralid r15, schedule; /* Call scheduler */
674 nop; /* delay slot */
675
676 /* Maybe handle a signal */
677 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
678 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
679 andi r11, r11, _TIF_SIGPENDING;
680 beqi r11, 1f; /* Signals to handle, handle them */
681
682 /*
683 * Handle a signal return; Pending signals should be in r18.
684 *
685 * Not all registers are saved by the normal trap/interrupt entry
686 * points (for instance, call-saved registers (because the normal
687 * C-compiler calling sequence in the kernel makes sure they're
688 * preserved), and call-clobbered registers in the case of
689 * traps), but signal handlers may want to examine or change the
690 * complete register state. Here we save anything not saved by
691 * the normal entry sequence, so that it may be safely restored
692 * (in a possibly modified form) after do_signal returns.
693 * store return registers separately because this macros is use
694 * for others exceptions */
695 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
696 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
697 addi r7, r0, 0; /* Arg 3: int in_syscall */
698 bralid r15, do_signal; /* Handle any signals */
699 nop;
700
701 /* Finally, return to user state. */
702 1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
703 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
704 swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
705 VM_OFF;
706 tophys(r1,r1);
707
708 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
709 lwi r4, r1, PTO+PT_R4;
710 RESTORE_REGS;
711 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
712
713 lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
714 bri 6f;
715 /* Return to kernel state. */
716 2: VM_OFF;
717 tophys(r1,r1);
718 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
719 lwi r4, r1, PTO+PT_R4;
720 RESTORE_REGS;
721 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
722
723 tovirt(r1,r1);
724 6:
725 EXC_return: /* Make global symbol for debugging */
726 rtbd r14, 0; /* Instructions to return from an IRQ */
727 nop;
728
729 /*
730 * HW EXCEPTION rutine end
731 */
732
733 /*
734 * Hardware maskable interrupts.
735 *
736 * The stack-pointer (r1) should have already been saved to the memory
737 * location PER_CPU(ENTRY_SP).
738 */
739 C_ENTRY(_interrupt):
740 /* MS: we are in physical address */
741 /* Save registers, switch to proper stack, convert SP to virtual.*/
742 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
743 swi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
744 /* MS: See if already in kernel mode. */
745 lwi r11, r0, TOPHYS(PER_CPU(KM));
746 beqi r11, 1f; /* MS: Jump ahead if coming from user */
747
748 /* Kernel-mode state save. */
749 or r11, r1, r0
750 tophys(r1,r11); /* MS: I have in r1 physical address where stack is */
751 /* MS: Save original SP - position PT_R1 to next stack frame 4 *1 - 152*/
752 swi r11, r1, (PT_R1 - PT_SIZE);
753 /* MS: restore r11 because of saving in SAVE_REGS */
754 lwi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
755 /* save registers */
756 /* MS: Make room on the stack -> activation record */
757 addik r1, r1, -STATE_SAVE_SIZE;
758 /* MS: store return registers separately because
759 * this macros is use for others exceptions */
760 swi r3, r1, PTO + PT_R3;
761 swi r4, r1, PTO + PT_R4;
762 SAVE_REGS
763 /* MS: store mode */
764 addi r11, r0, 1; /* MS: Was in kernel-mode. */
765 swi r11, r1, PTO + PT_MODE; /* MS: and save it */
766 brid 2f;
767 nop; /* MS: Fill delay slot */
768
769 1:
770 /* User-mode state save. */
771 /* MS: restore r11 -> FIXME move before SAVE_REG */
772 lwi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
773 /* MS: get the saved current */
774 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
775 tophys(r1,r1);
776 lwi r1, r1, TS_THREAD_INFO;
777 addik r1, r1, THREAD_SIZE;
778 tophys(r1,r1);
779 /* save registers */
780 addik r1, r1, -STATE_SAVE_SIZE;
781 swi r3, r1, PTO+PT_R3;
782 swi r4, r1, PTO+PT_R4;
783 SAVE_REGS
784 /* calculate mode */
785 swi r0, r1, PTO + PT_MODE;
786 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
787 swi r11, r1, PTO+PT_R1;
788 /* setup kernel mode to KM */
789 addi r11, r0, 1;
790 swi r11, r0, TOPHYS(PER_CPU(KM));
791
792 2:
793 lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
794 swi r0, r1, PTO + PT_R0;
795 tovirt(r1,r1)
796 la r5, r1, PTO;
797 set_vms;
798 la r11, r0, do_IRQ;
799 la r15, r0, irq_call;
800 irq_call:rtbd r11, 0;
801 nop;
802
803 /* MS: we are in virtual mode */
804 ret_from_irq:
805 lwi r11, r1, PTO + PT_MODE;
806 bnei r11, 2f;
807
808 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
809 lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
810 andi r11, r11, _TIF_NEED_RESCHED;
811 beqi r11, 5f
812 bralid r15, schedule;
813 nop; /* delay slot */
814
815 /* Maybe handle a signal */
816 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */
817 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
818 andi r11, r11, _TIF_SIGPENDING;
819 beqid r11, no_intr_resched
820 /* Handle a signal return; Pending signals should be in r18. */
821 addi r7, r0, 0; /* Arg 3: int in_syscall */
822 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
823 bralid r15, do_signal; /* Handle any signals */
824 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
825
826 /* Finally, return to user state. */
827 no_intr_resched:
828 /* Disable interrupts, we are now committed to the state restore */
829 disable_irq
830 swi r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */
831 add r11, r0, CURRENT_TASK;
832 swi r11, r0, PER_CPU(CURRENT_SAVE);
833 VM_OFF;
834 tophys(r1,r1);
835 lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
836 lwi r4, r1, PTO + PT_R4;
837 RESTORE_REGS
838 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
839 lwi r1, r1, PT_R1 - PT_SIZE;
840 bri 6f;
841 /* MS: Return to kernel state. */
842 2:
843 #ifdef CONFIG_PREEMPT
844 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
845 /* MS: get preempt_count from thread info */
846 lwi r5, r11, TI_PREEMPT_COUNT;
847 bgti r5, restore;
848
849 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
850 andi r5, r5, _TIF_NEED_RESCHED;
851 beqi r5, restore /* if zero jump over */
852
853 preempt:
854 /* interrupts are off that's why I am calling preempt_chedule_irq */
855 bralid r15, preempt_schedule_irq
856 nop
857 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
858 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
859 andi r5, r5, _TIF_NEED_RESCHED;
860 bnei r5, preempt /* if non zero jump to resched */
861 restore:
862 #endif
863 VM_OFF /* MS: turn off MMU */
864 tophys(r1,r1)
865 lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
866 lwi r4, r1, PTO + PT_R4;
867 RESTORE_REGS
868 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
869 tovirt(r1,r1);
870 6:
871 IRQ_return: /* MS: Make global symbol for debugging */
872 rtid r14, 0
873 nop
874
875 /*
876 * `Debug' trap
877 * We enter dbtrap in "BIP" (breakpoint) mode.
878 * So we exit the breakpoint mode with an 'rtbd' and proceed with the
879 * original dbtrap.
880 * however, wait to save state first
881 */
882 C_ENTRY(_debug_exception):
883 /* BIP bit is set on entry, no interrupts can occur */
884 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
885
886 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
887 set_bip; /*equalize initial state for all possible entries*/
888 clear_eip;
889 enable_irq;
890 lwi r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
891 beqi r11, 1f; /* Jump ahead if coming from user */
892 /* Kernel-mode state save. */
893 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
894 tophys(r1,r11);
895 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
896 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
897
898 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
899 swi r3, r1, PTO + PT_R3;
900 swi r4, r1, PTO + PT_R4;
901 SAVE_REGS;
902
903 addi r11, r0, 1; /* Was in kernel-mode. */
904 swi r11, r1, PTO + PT_MODE;
905 brid 2f;
906 nop; /* Fill delay slot */
907 1: /* User-mode state save. */
908 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
909 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
910 tophys(r1,r1);
911 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
912 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
913 tophys(r1,r1);
914
915 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
916 swi r3, r1, PTO + PT_R3;
917 swi r4, r1, PTO + PT_R4;
918 SAVE_REGS;
919
920 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */
921 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
922 swi r11, r1, PTO+PT_R1; /* Store user SP. */
923 addi r11, r0, 1;
924 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
925 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
926 /* Save away the syscall number. */
927 swi r0, r1, PTO+PT_R0;
928 tovirt(r1,r1)
929
930 addi r5, r0, SIGTRAP /* send the trap signal */
931 add r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */
932 addk r7, r0, r0 /* 3rd param zero */
933
934 set_vms;
935 la r11, r0, send_sig;
936 la r15, r0, dbtrap_call;
937 dbtrap_call: rtbd r11, 0;
938 nop;
939
940 set_bip; /* Ints masked for state restore*/
941 lwi r11, r1, PTO+PT_MODE;
942 bnei r11, 2f;
943
944 /* Get current task ptr into r11 */
945 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
946 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
947 andi r11, r11, _TIF_NEED_RESCHED;
948 beqi r11, 5f;
949
950 /* Call the scheduler before returning from a syscall/trap. */
951
952 bralid r15, schedule; /* Call scheduler */
953 nop; /* delay slot */
954 /* XXX Is PT_DTRACE handling needed here? */
955 /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */
956
957 /* Maybe handle a signal */
958 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
959 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
960 andi r11, r11, _TIF_SIGPENDING;
961 beqi r11, 1f; /* Signals to handle, handle them */
962
963 /* Handle a signal return; Pending signals should be in r18. */
964 /* Not all registers are saved by the normal trap/interrupt entry
965 points (for instance, call-saved registers (because the normal
966 C-compiler calling sequence in the kernel makes sure they're
967 preserved), and call-clobbered registers in the case of
968 traps), but signal handlers may want to examine or change the
969 complete register state. Here we save anything not saved by
970 the normal entry sequence, so that it may be safely restored
971 (in a possibly modified form) after do_signal returns. */
972
973 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
974 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
975 addi r7, r0, 0; /* Arg 3: int in_syscall */
976 bralid r15, do_signal; /* Handle any signals */
977 nop;
978
979
980 /* Finally, return to user state. */
981 1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
982 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
983 swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
984 VM_OFF;
985 tophys(r1,r1);
986
987 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
988 lwi r4, r1, PTO+PT_R4;
989 RESTORE_REGS
990 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
991
992
993 lwi r1, r1, PT_R1 - PT_SIZE;
994 /* Restore user stack pointer. */
995 bri 6f;
996
997 /* Return to kernel state. */
998 2: VM_OFF;
999 tophys(r1,r1);
1000 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
1001 lwi r4, r1, PTO+PT_R4;
1002 RESTORE_REGS
1003 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
1004
1005 tovirt(r1,r1);
1006 6:
1007 DBTRAP_return: /* Make global symbol for debugging */
1008 rtbd r14, 0; /* Instructions to return from an IRQ */
1009 nop;
1010
1011
1012
1013 ENTRY(_switch_to)
1014 /* prepare return value */
1015 addk r3, r0, CURRENT_TASK
1016
1017 /* save registers in cpu_context */
1018 /* use r11 and r12, volatile registers, as temp register */
1019 /* give start of cpu_context for previous process */
1020 addik r11, r5, TI_CPU_CONTEXT
1021 swi r1, r11, CC_R1
1022 swi r2, r11, CC_R2
1023 /* skip volatile registers.
1024 * they are saved on stack when we jumped to _switch_to() */
1025 /* dedicated registers */
1026 swi r13, r11, CC_R13
1027 swi r14, r11, CC_R14
1028 swi r15, r11, CC_R15
1029 swi r16, r11, CC_R16
1030 swi r17, r11, CC_R17
1031 swi r18, r11, CC_R18
1032 /* save non-volatile registers */
1033 swi r19, r11, CC_R19
1034 swi r20, r11, CC_R20
1035 swi r21, r11, CC_R21
1036 swi r22, r11, CC_R22
1037 swi r23, r11, CC_R23
1038 swi r24, r11, CC_R24
1039 swi r25, r11, CC_R25
1040 swi r26, r11, CC_R26
1041 swi r27, r11, CC_R27
1042 swi r28, r11, CC_R28
1043 swi r29, r11, CC_R29
1044 swi r30, r11, CC_R30
1045 /* special purpose registers */
1046 mfs r12, rmsr
1047 nop
1048 swi r12, r11, CC_MSR
1049 mfs r12, rear
1050 nop
1051 swi r12, r11, CC_EAR
1052 mfs r12, resr
1053 nop
1054 swi r12, r11, CC_ESR
1055 mfs r12, rfsr
1056 nop
1057 swi r12, r11, CC_FSR
1058
1059 /* update r31, the current-give me pointer to task which will be next */
1060 lwi CURRENT_TASK, r6, TI_TASK
1061 /* stored it to current_save too */
1062 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
1063
1064 /* get new process' cpu context and restore */
1065 /* give me start where start context of next task */
1066 addik r11, r6, TI_CPU_CONTEXT
1067
1068 /* non-volatile registers */
1069 lwi r30, r11, CC_R30
1070 lwi r29, r11, CC_R29
1071 lwi r28, r11, CC_R28
1072 lwi r27, r11, CC_R27
1073 lwi r26, r11, CC_R26
1074 lwi r25, r11, CC_R25
1075 lwi r24, r11, CC_R24
1076 lwi r23, r11, CC_R23
1077 lwi r22, r11, CC_R22
1078 lwi r21, r11, CC_R21
1079 lwi r20, r11, CC_R20
1080 lwi r19, r11, CC_R19
1081 /* dedicated registers */
1082 lwi r18, r11, CC_R18
1083 lwi r17, r11, CC_R17
1084 lwi r16, r11, CC_R16
1085 lwi r15, r11, CC_R15
1086 lwi r14, r11, CC_R14
1087 lwi r13, r11, CC_R13
1088 /* skip volatile registers */
1089 lwi r2, r11, CC_R2
1090 lwi r1, r11, CC_R1
1091
1092 /* special purpose registers */
1093 lwi r12, r11, CC_FSR
1094 mts rfsr, r12
1095 nop
1096 lwi r12, r11, CC_MSR
1097 mts rmsr, r12
1098 nop
1099
1100 rtsd r15, 8
1101 nop
1102
1103 ENTRY(_reset)
1104 brai 0x70; /* Jump back to FS-boot */
1105
1106 ENTRY(_break)
1107 mfs r5, rmsr
1108 nop
1109 swi r5, r0, 0x250 + TOPHYS(r0_ram)
1110 mfs r5, resr
1111 nop
1112 swi r5, r0, 0x254 + TOPHYS(r0_ram)
1113 bri 0
1114
1115 /* These are compiled and loaded into high memory, then
1116 * copied into place in mach_early_setup */
1117 .section .init.ivt, "ax"
1118 .org 0x0
1119 /* this is very important - here is the reset vector */
1120 /* in current MMU branch you don't care what is here - it is
1121 * used from bootloader site - but this is correct for FS-BOOT */
1122 brai 0x70
1123 nop
1124 brai TOPHYS(_user_exception); /* syscall handler */
1125 brai TOPHYS(_interrupt); /* Interrupt handler */
1126 brai TOPHYS(_break); /* nmi trap handler */
1127 brai TOPHYS(_hw_exception_handler); /* HW exception handler */
1128
1129 .org 0x60
1130 brai TOPHYS(_debug_exception); /* debug trap handler*/
1131
1132 .section .rodata,"a"
1133 #include "syscall_table.S"
1134
1135 syscall_table_size=(.-sys_call_table)
1136
This page took 0.056264 seconds and 5 git commands to generate.