3fee82d6e9a21aaf6170c301340110b69b3ed0f2
[deliverable/linux.git] / arch / microblaze / kernel / entry.S
1 /*
2 * Low-level system-call handling, trap handlers and context-switching
3 *
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008-2009 PetaLogix
6 * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
7 * Copyright (C) 2001,2002 NEC Corporation
8 * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
9 *
10 * This file is subject to the terms and conditions of the GNU General
11 * Public License. See the file COPYING in the main directory of this
12 * archive for more details.
13 *
14 * Written by Miles Bader <miles@gnu.org>
15 * Heavily modified by John Williams for Microblaze
16 */
17
18 #include <linux/sys.h>
19 #include <linux/linkage.h>
20
21 #include <asm/entry.h>
22 #include <asm/current.h>
23 #include <asm/processor.h>
24 #include <asm/exceptions.h>
25 #include <asm/asm-offsets.h>
26 #include <asm/thread_info.h>
27
28 #include <asm/page.h>
29 #include <asm/unistd.h>
30
31 #include <linux/errno.h>
32 #include <asm/signal.h>
33
34 #undef DEBUG
35
36 /* The size of a state save frame. */
37 #define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE)
38
39 /* The offset of the struct pt_regs in a `state save frame' on the stack. */
40 #define PTO STATE_SAVE_ARG_SPACE /* 24 the space for args */
41
42 #define C_ENTRY(name) .globl name; .align 4; name
43
44 /*
45 * Various ways of setting and clearing BIP in flags reg.
46 * This is mucky, but necessary using microblaze version that
47 * allows msr ops to write to BIP
48 */
49 #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
50 .macro clear_bip
51 msrclr r11, MSR_BIP
52 nop
53 .endm
54
55 .macro set_bip
56 msrset r11, MSR_BIP
57 nop
58 .endm
59
60 .macro clear_eip
61 msrclr r11, MSR_EIP
62 nop
63 .endm
64
65 .macro set_ee
66 msrset r11, MSR_EE
67 nop
68 .endm
69
70 .macro disable_irq
71 msrclr r11, MSR_IE
72 nop
73 .endm
74
75 .macro enable_irq
76 msrset r11, MSR_IE
77 nop
78 .endm
79
80 .macro set_ums
81 msrset r11, MSR_UMS
82 nop
83 msrclr r11, MSR_VMS
84 nop
85 .endm
86
87 .macro set_vms
88 msrclr r11, MSR_UMS
89 nop
90 msrset r11, MSR_VMS
91 nop
92 .endm
93
94 .macro clear_vms_ums
95 msrclr r11, MSR_VMS | MSR_UMS
96 nop
97 .endm
98 #else
99 .macro clear_bip
100 mfs r11, rmsr
101 nop
102 andi r11, r11, ~MSR_BIP
103 mts rmsr, r11
104 nop
105 .endm
106
107 .macro set_bip
108 mfs r11, rmsr
109 nop
110 ori r11, r11, MSR_BIP
111 mts rmsr, r11
112 nop
113 .endm
114
115 .macro clear_eip
116 mfs r11, rmsr
117 nop
118 andi r11, r11, ~MSR_EIP
119 mts rmsr, r11
120 nop
121 .endm
122
123 .macro set_ee
124 mfs r11, rmsr
125 nop
126 ori r11, r11, MSR_EE
127 mts rmsr, r11
128 nop
129 .endm
130
131 .macro disable_irq
132 mfs r11, rmsr
133 nop
134 andi r11, r11, ~MSR_IE
135 mts rmsr, r11
136 nop
137 .endm
138
139 .macro enable_irq
140 mfs r11, rmsr
141 nop
142 ori r11, r11, MSR_IE
143 mts rmsr, r11
144 nop
145 .endm
146
147 .macro set_ums
148 mfs r11, rmsr
149 nop
150 ori r11, r11, MSR_VMS
151 andni r11, r11, MSR_UMS
152 mts rmsr, r11
153 nop
154 .endm
155
156 .macro set_vms
157 mfs r11, rmsr
158 nop
159 ori r11, r11, MSR_VMS
160 andni r11, r11, MSR_UMS
161 mts rmsr, r11
162 nop
163 .endm
164
165 .macro clear_vms_ums
166 mfs r11, rmsr
167 nop
168 andni r11, r11, (MSR_VMS|MSR_UMS)
169 mts rmsr,r11
170 nop
171 .endm
172 #endif
173
174 /* Define how to call high-level functions. With MMU, virtual mode must be
175 * enabled when calling the high-level function. Clobbers R11.
176 * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
177 */
178
179 /* turn on virtual protected mode save */
180 #define VM_ON \
181 set_ums; \
182 rted r0, 2f; \
183 nop; \
184 2:
185
186 /* turn off virtual protected mode save and user mode save*/
187 #define VM_OFF \
188 clear_vms_ums; \
189 rted r0, TOPHYS(1f); \
190 nop; \
191 1:
192
193 #define SAVE_REGS \
194 swi r2, r1, PTO+PT_R2; /* Save SDA */ \
195 swi r3, r1, PTO+PT_R3; \
196 swi r4, r1, PTO+PT_R4; \
197 swi r5, r1, PTO+PT_R5; \
198 swi r6, r1, PTO+PT_R6; \
199 swi r7, r1, PTO+PT_R7; \
200 swi r8, r1, PTO+PT_R8; \
201 swi r9, r1, PTO+PT_R9; \
202 swi r10, r1, PTO+PT_R10; \
203 swi r11, r1, PTO+PT_R11; /* save clobbered regs after rval */\
204 swi r12, r1, PTO+PT_R12; \
205 swi r13, r1, PTO+PT_R13; /* Save SDA2 */ \
206 swi r14, r1, PTO+PT_PC; /* PC, before IRQ/trap */ \
207 swi r15, r1, PTO+PT_R15; /* Save LP */ \
208 swi r18, r1, PTO+PT_R18; /* Save asm scratch reg */ \
209 swi r19, r1, PTO+PT_R19; \
210 swi r20, r1, PTO+PT_R20; \
211 swi r21, r1, PTO+PT_R21; \
212 swi r22, r1, PTO+PT_R22; \
213 swi r23, r1, PTO+PT_R23; \
214 swi r24, r1, PTO+PT_R24; \
215 swi r25, r1, PTO+PT_R25; \
216 swi r26, r1, PTO+PT_R26; \
217 swi r27, r1, PTO+PT_R27; \
218 swi r28, r1, PTO+PT_R28; \
219 swi r29, r1, PTO+PT_R29; \
220 swi r30, r1, PTO+PT_R30; \
221 swi r31, r1, PTO+PT_R31; /* Save current task reg */ \
222 mfs r11, rmsr; /* save MSR */ \
223 nop; \
224 swi r11, r1, PTO+PT_MSR;
225
226 #define RESTORE_REGS \
227 lwi r11, r1, PTO+PT_MSR; \
228 mts rmsr , r11; \
229 nop; \
230 lwi r2, r1, PTO+PT_R2; /* restore SDA */ \
231 lwi r3, r1, PTO+PT_R3; \
232 lwi r4, r1, PTO+PT_R4; \
233 lwi r5, r1, PTO+PT_R5; \
234 lwi r6, r1, PTO+PT_R6; \
235 lwi r7, r1, PTO+PT_R7; \
236 lwi r8, r1, PTO+PT_R8; \
237 lwi r9, r1, PTO+PT_R9; \
238 lwi r10, r1, PTO+PT_R10; \
239 lwi r11, r1, PTO+PT_R11; /* restore clobbered regs after rval */\
240 lwi r12, r1, PTO+PT_R12; \
241 lwi r13, r1, PTO+PT_R13; /* restore SDA2 */ \
242 lwi r14, r1, PTO+PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
243 lwi r15, r1, PTO+PT_R15; /* restore LP */ \
244 lwi r18, r1, PTO+PT_R18; /* restore asm scratch reg */ \
245 lwi r19, r1, PTO+PT_R19; \
246 lwi r20, r1, PTO+PT_R20; \
247 lwi r21, r1, PTO+PT_R21; \
248 lwi r22, r1, PTO+PT_R22; \
249 lwi r23, r1, PTO+PT_R23; \
250 lwi r24, r1, PTO+PT_R24; \
251 lwi r25, r1, PTO+PT_R25; \
252 lwi r26, r1, PTO+PT_R26; \
253 lwi r27, r1, PTO+PT_R27; \
254 lwi r28, r1, PTO+PT_R28; \
255 lwi r29, r1, PTO+PT_R29; \
256 lwi r30, r1, PTO+PT_R30; \
257 lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */
258
259 .text
260
261 /*
262 * User trap.
263 *
264 * System calls are handled here.
265 *
266 * Syscall protocol:
267 * Syscall number in r12, args in r5-r10
268 * Return value in r3
269 *
270 * Trap entered via brki instruction, so BIP bit is set, and interrupts
271 * are masked. This is nice, means we don't have to CLI before state save
272 */
273 C_ENTRY(_user_exception):
274 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
275 addi r14, r14, 4 /* return address is 4 byte after call */
276 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
277
278 mfs r11, rmsr
279 nop
280 andi r11, r11, MSR_UMS
281 bnei r11, 1f
282
283 /* Kernel-mode state save - kernel execve */
284 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
285 tophys(r1,r11);
286 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
287 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
288
289 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
290 SAVE_REGS
291
292 addi r11, r0, 1; /* Was in kernel-mode. */
293 swi r11, r1, PTO+PT_MODE; /* pt_regs -> kernel mode */
294 brid 2f;
295 nop; /* Fill delay slot */
296
297 /* User-mode state save. */
298 1:
299 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
300 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
301 tophys(r1,r1);
302 lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
303 /* calculate kernel stack pointer from task struct 8k */
304 addik r1, r1, THREAD_SIZE;
305 tophys(r1,r1);
306
307 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
308 SAVE_REGS
309
310 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */
311 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
312 swi r11, r1, PTO+PT_R1; /* Store user SP. */
313 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
314 /* Save away the syscall number. */
315 swi r12, r1, PTO+PT_R0;
316 tovirt(r1,r1)
317
318 /* where the trap should return need -8 to adjust for rtsd r15, 8*/
319 /* Jump to the appropriate function for the system call number in r12
320 * (r12 is not preserved), or return an error if r12 is not valid. The LP
321 * register should point to the location where
322 * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
323
324 # Step into virtual mode.
325 set_vms;
326 addik r11, r0, 3f
327 rtid r11, 0
328 nop
329 3:
330 lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
331 lwi r11, r11, TI_FLAGS /* get flags in thread info */
332 andi r11, r11, _TIF_WORK_SYSCALL_MASK
333 beqi r11, 4f
334
335 addik r3, r0, -ENOSYS
336 swi r3, r1, PTO + PT_R3
337 brlid r15, do_syscall_trace_enter
338 addik r5, r1, PTO + PT_R0
339
340 # do_syscall_trace_enter returns the new syscall nr.
341 addk r12, r0, r3
342 lwi r5, r1, PTO+PT_R5;
343 lwi r6, r1, PTO+PT_R6;
344 lwi r7, r1, PTO+PT_R7;
345 lwi r8, r1, PTO+PT_R8;
346 lwi r9, r1, PTO+PT_R9;
347 lwi r10, r1, PTO+PT_R10;
348 4:
349 /* Jump to the appropriate function for the system call number in r12
350 * (r12 is not preserved), or return an error if r12 is not valid.
351 * The LP register should point to the location where the called function
352 * should return. [note that MAKE_SYS_CALL uses label 1] */
353 /* See if the system call number is valid */
354 addi r11, r12, -__NR_syscalls;
355 bgei r11,5f;
356 /* Figure out which function to use for this system call. */
357 /* Note Microblaze barrel shift is optional, so don't rely on it */
358 add r12, r12, r12; /* convert num -> ptr */
359 add r12, r12, r12;
360
361 #ifdef DEBUG
362 /* Trac syscalls and stored them to r0_ram */
363 lwi r3, r12, 0x400 + r0_ram
364 addi r3, r3, 1
365 swi r3, r12, 0x400 + r0_ram
366 #endif
367
368 # Find and jump into the syscall handler.
369 lwi r12, r12, sys_call_table
370 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
371 addi r15, r0, ret_from_trap-8
372 bra r12
373
374 /* The syscall number is invalid, return an error. */
375 5:
376 addi r3, r0, -ENOSYS;
377 rtsd r15,8; /* looks like a normal subroutine return */
378 or r0, r0, r0
379
380
381 /* Entry point used to return from a syscall/trap */
382 /* We re-enable BIP bit before state restore */
383 C_ENTRY(ret_from_trap):
384 swi r3, r1, PTO + PT_R3
385 swi r4, r1, PTO + PT_R4
386
387 lwi r11, r1, PTO+PT_MODE;
388 /* See if returning to kernel mode, if so, skip resched &c. */
389 bnei r11, 2f;
390 /* We're returning to user mode, so check for various conditions that
391 * trigger rescheduling. */
392 /* FIXME: Restructure all these flag checks. */
393 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
394 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
395 andi r11, r11, _TIF_WORK_SYSCALL_MASK
396 beqi r11, 1f
397
398 brlid r15, do_syscall_trace_leave
399 addik r5, r1, PTO + PT_R0
400 1:
401 /* We're returning to user mode, so check for various conditions that
402 * trigger rescheduling. */
403 /* get thread info from current task */
404 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
405 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
406 andi r11, r11, _TIF_NEED_RESCHED;
407 beqi r11, 5f;
408
409 bralid r15, schedule; /* Call scheduler */
410 nop; /* delay slot */
411
412 /* Maybe handle a signal */
413 5: /* get thread info from current task*/
414 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
415 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
416 andi r11, r11, _TIF_SIGPENDING;
417 beqi r11, 1f; /* Signals to handle, handle them */
418
419 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
420 addi r7, r0, 1; /* Arg 3: int in_syscall */
421 bralid r15, do_signal; /* Handle any signals */
422 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
423
424 /* Finally, return to user state. */
425 1: set_bip; /* Ints masked for state restore */
426 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
427 VM_OFF;
428 tophys(r1,r1);
429 RESTORE_REGS;
430 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
431 lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
432 bri 6f;
433
434 /* Return to kernel state. */
435 2: set_bip; /* Ints masked for state restore */
436 VM_OFF;
437 tophys(r1,r1);
438 RESTORE_REGS;
439 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
440 tovirt(r1,r1);
441 6:
442 TRAP_return: /* Make global symbol for debugging */
443 rtbd r14, 0; /* Instructions to return from an IRQ */
444 nop;
445
446
447 /* These syscalls need access to the struct pt_regs on the stack, so we
448 implement them in assembly (they're basically all wrappers anyway). */
449
450 C_ENTRY(sys_fork_wrapper):
451 addi r5, r0, SIGCHLD /* Arg 0: flags */
452 lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */
453 addik r7, r1, PTO /* Arg 2: parent context */
454 add r8. r0, r0 /* Arg 3: (unused) */
455 add r9, r0, r0; /* Arg 4: (unused) */
456 add r10, r0, r0; /* Arg 5: (unused) */
457 brid do_fork /* Do real work (tail-call) */
458 nop;
459
460 /* This the initial entry point for a new child thread, with an appropriate
461 stack in place that makes it look the the child is in the middle of an
462 syscall. This function is actually `returned to' from switch_thread
463 (copy_thread makes ret_from_fork the return address in each new thread's
464 saved context). */
465 C_ENTRY(ret_from_fork):
466 bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
467 add r3, r5, r0; /* switch_thread returns the prev task */
468 /* ( in the delay slot ) */
469 add r3, r0, r0; /* Child's fork call should return 0. */
470 brid ret_from_trap; /* Do normal trap return */
471 nop;
472
473 C_ENTRY(sys_vfork):
474 brid microblaze_vfork /* Do real work (tail-call) */
475 addik r5, r1, PTO
476
477 C_ENTRY(sys_clone):
478 bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */
479 lwi r6, r1, PTO + PT_R1; /* If so, use paret's stack ptr */
480 1: addik r7, r1, PTO; /* Arg 2: parent context */
481 add r8, r0, r0; /* Arg 3: (unused) */
482 add r9, r0, r0; /* Arg 4: (unused) */
483 add r10, r0, r0; /* Arg 5: (unused) */
484 brid do_fork /* Do real work (tail-call) */
485 nop;
486
487 C_ENTRY(sys_execve):
488 addik r8, r1, PTO; /* add user context as 4th arg */
489 brid microblaze_execve; /* Do real work (tail-call).*/
490 nop;
491
492 C_ENTRY(sys_rt_sigreturn_wrapper):
493 swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
494 swi r4, r1, PTO+PT_R4;
495 addik r5, r1, PTO; /* add user context as 1st arg */
496 brlid r15, sys_rt_sigreturn /* Do real work */
497 nop;
498 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
499 lwi r4, r1, PTO+PT_R4;
500 bri ret_from_trap /* fall through will not work here due to align */
501 nop;
502
503 /*
504 * HW EXCEPTION rutine start
505 */
506
507 #define SAVE_STATE \
508 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \
509 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */ \
510 set_bip; /*equalize initial state for all possible entries*/\
511 clear_eip; \
512 set_ee; \
513 /* See if already in kernel mode.*/ \
514 mfs r11, rmsr; \
515 nop; \
516 andi r11, r11, MSR_UMS; \
517 bnei r11, 1f; \
518 /* Kernel-mode state save. */ \
519 /* Reload kernel stack-ptr. */ \
520 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
521 tophys(r1,r11); \
522 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */ \
523 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
524 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
525 SAVE_REGS \
526 /* PC, before IRQ/trap - this is one instruction above */ \
527 swi r17, r1, PTO+PT_PC; \
528 \
529 addi r11, r0, 1; /* Was in kernel-mode. */ \
530 swi r11, r1, PTO+PT_MODE; \
531 brid 2f; \
532 nop; /* Fill delay slot */ \
533 1: /* User-mode state save. */ \
534 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
535 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
536 tophys(r1,r1); \
537 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
538 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */\
539 tophys(r1,r1); \
540 \
541 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
542 SAVE_REGS \
543 /* PC, before IRQ/trap - this is one instruction above FIXME*/ \
544 swi r17, r1, PTO+PT_PC; \
545 \
546 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */ \
547 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
548 swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
549 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); \
550 /* Save away the syscall number. */ \
551 swi r0, r1, PTO+PT_R0; \
552 tovirt(r1,r1)
553
554 C_ENTRY(full_exception_trap):
555 /* adjust exception address for privileged instruction
556 * for finding where is it */
557 addik r17, r17, -4
558 SAVE_STATE /* Save registers */
559 /* FIXME this can be store directly in PT_ESR reg.
560 * I tested it but there is a fault */
561 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
562 addik r15, r0, ret_from_exc - 8
563 addik r5, r1, PTO /* parameter struct pt_regs * regs */
564 mfs r6, resr
565 nop
566 mfs r7, rfsr; /* save FSR */
567 nop
568 mts rfsr, r0; /* Clear sticky fsr */
569 nop
570 addik r12, r0, full_exception
571 set_vms;
572 rtbd r12, 0;
573 nop;
574
575 /*
576 * Unaligned data trap.
577 *
578 * Unaligned data trap last on 4k page is handled here.
579 *
580 * Trap entered via exception, so EE bit is set, and interrupts
581 * are masked. This is nice, means we don't have to CLI before state save
582 *
583 * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
584 */
585 C_ENTRY(unaligned_data_trap):
586 SAVE_STATE /* Save registers.*/
587 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
588 addik r15, r0, ret_from_exc-8
589 mfs r3, resr /* ESR */
590 nop
591 mfs r4, rear /* EAR */
592 nop
593 addik r7, r1, PTO /* parameter struct pt_regs * regs */
594 addik r12, r0, _unaligned_data_exception
595 set_vms;
596 rtbd r12, 0; /* interrupts enabled */
597 nop;
598
599 /*
600 * Page fault traps.
601 *
602 * If the real exception handler (from hw_exception_handler.S) didn't find
603 * the mapping for the process, then we're thrown here to handle such situation.
604 *
605 * Trap entered via exceptions, so EE bit is set, and interrupts
606 * are masked. This is nice, means we don't have to CLI before state save
607 *
608 * Build a standard exception frame for TLB Access errors. All TLB exceptions
609 * will bail out to this point if they can't resolve the lightweight TLB fault.
610 *
611 * The C function called is in "arch/microblaze/mm/fault.c", declared as:
612 * void do_page_fault(struct pt_regs *regs,
613 * unsigned long address,
614 * unsigned long error_code)
615 */
616 /* data and intruction trap - which is choose is resolved int fault.c */
617 C_ENTRY(page_fault_data_trap):
618 SAVE_STATE /* Save registers.*/
619 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
620 addik r15, r0, ret_from_exc-8
621 addik r5, r1, PTO /* parameter struct pt_regs * regs */
622 mfs r6, rear /* parameter unsigned long address */
623 nop
624 mfs r7, resr /* parameter unsigned long error_code */
625 nop
626 addik r12, r0, do_page_fault
627 set_vms;
628 rtbd r12, 0; /* interrupts enabled */
629 nop;
630
631 C_ENTRY(page_fault_instr_trap):
632 SAVE_STATE /* Save registers.*/
633 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
634 addik r15, r0, ret_from_exc-8
635 addik r5, r1, PTO /* parameter struct pt_regs * regs */
636 mfs r6, rear /* parameter unsigned long address */
637 nop
638 ori r7, r0, 0 /* parameter unsigned long error_code */
639 addik r12, r0, do_page_fault
640 set_vms;
641 rtbd r12, 0; /* interrupts enabled */
642 nop;
643
644 /* Entry point used to return from an exception. */
645 C_ENTRY(ret_from_exc):
646 lwi r11, r1, PTO+PT_MODE;
647 bnei r11, 2f; /* See if returning to kernel mode, */
648 /* ... if so, skip resched &c. */
649
650 /* We're returning to user mode, so check for various conditions that
651 trigger rescheduling. */
652 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
653 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
654 andi r11, r11, _TIF_NEED_RESCHED;
655 beqi r11, 5f;
656
657 /* Call the scheduler before returning from a syscall/trap. */
658 bralid r15, schedule; /* Call scheduler */
659 nop; /* delay slot */
660
661 /* Maybe handle a signal */
662 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
663 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
664 andi r11, r11, _TIF_SIGPENDING;
665 beqi r11, 1f; /* Signals to handle, handle them */
666
667 /*
668 * Handle a signal return; Pending signals should be in r18.
669 *
670 * Not all registers are saved by the normal trap/interrupt entry
671 * points (for instance, call-saved registers (because the normal
672 * C-compiler calling sequence in the kernel makes sure they're
673 * preserved), and call-clobbered registers in the case of
674 * traps), but signal handlers may want to examine or change the
675 * complete register state. Here we save anything not saved by
676 * the normal entry sequence, so that it may be safely restored
677 * (in a possibly modified form) after do_signal returns. */
678 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
679 addi r7, r0, 0; /* Arg 3: int in_syscall */
680 bralid r15, do_signal; /* Handle any signals */
681 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
682
683 /* Finally, return to user state. */
684 1: set_bip; /* Ints masked for state restore */
685 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
686 VM_OFF;
687 tophys(r1,r1);
688
689 RESTORE_REGS;
690 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
691
692 lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
693 bri 6f;
694 /* Return to kernel state. */
695 2: set_bip; /* Ints masked for state restore */
696 VM_OFF;
697 tophys(r1,r1);
698 RESTORE_REGS;
699 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
700
701 tovirt(r1,r1);
702 6:
703 EXC_return: /* Make global symbol for debugging */
704 rtbd r14, 0; /* Instructions to return from an IRQ */
705 nop;
706
707 /*
708 * HW EXCEPTION rutine end
709 */
710
711 /*
712 * Hardware maskable interrupts.
713 *
714 * The stack-pointer (r1) should have already been saved to the memory
715 * location PER_CPU(ENTRY_SP).
716 */
717 C_ENTRY(_interrupt):
718 /* MS: we are in physical address */
719 /* Save registers, switch to proper stack, convert SP to virtual.*/
720 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
721 swi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
722 /* MS: See if already in kernel mode. */
723 mfs r11, rmsr
724 nop
725 andi r11, r11, MSR_UMS
726 bnei r11, 1f
727
728 /* Kernel-mode state save. */
729 or r11, r1, r0
730 tophys(r1,r11); /* MS: I have in r1 physical address where stack is */
731 /* MS: Save original SP - position PT_R1 to next stack frame 4 *1 - 152*/
732 swi r11, r1, (PT_R1 - PT_SIZE);
733 /* MS: restore r11 because of saving in SAVE_REGS */
734 lwi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
735 /* save registers */
736 /* MS: Make room on the stack -> activation record */
737 addik r1, r1, -STATE_SAVE_SIZE;
738 SAVE_REGS
739 /* MS: store mode */
740 addi r11, r0, 1; /* MS: Was in kernel-mode. */
741 swi r11, r1, PTO + PT_MODE; /* MS: and save it */
742 brid 2f;
743 nop; /* MS: Fill delay slot */
744
745 1:
746 /* User-mode state save. */
747 /* MS: restore r11 -> FIXME move before SAVE_REG */
748 lwi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
749 /* MS: get the saved current */
750 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
751 tophys(r1,r1);
752 lwi r1, r1, TS_THREAD_INFO;
753 addik r1, r1, THREAD_SIZE;
754 tophys(r1,r1);
755 /* save registers */
756 addik r1, r1, -STATE_SAVE_SIZE;
757 SAVE_REGS
758 /* calculate mode */
759 swi r0, r1, PTO + PT_MODE;
760 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
761 swi r11, r1, PTO+PT_R1;
762 2:
763 lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
764 swi r0, r1, PTO + PT_R0;
765 tovirt(r1,r1)
766 addik r5, r1, PTO;
767 set_vms;
768 addik r11, r0, do_IRQ;
769 addik r15, r0, irq_call;
770 irq_call:rtbd r11, 0;
771 nop;
772
773 /* MS: we are in virtual mode */
774 ret_from_irq:
775 lwi r11, r1, PTO + PT_MODE;
776 bnei r11, 2f;
777
778 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
779 lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
780 andi r11, r11, _TIF_NEED_RESCHED;
781 beqi r11, 5f
782 bralid r15, schedule;
783 nop; /* delay slot */
784
785 /* Maybe handle a signal */
786 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */
787 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
788 andi r11, r11, _TIF_SIGPENDING;
789 beqid r11, no_intr_resched
790 /* Handle a signal return; Pending signals should be in r18. */
791 addi r7, r0, 0; /* Arg 3: int in_syscall */
792 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
793 bralid r15, do_signal; /* Handle any signals */
794 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
795
796 /* Finally, return to user state. */
797 no_intr_resched:
798 /* Disable interrupts, we are now committed to the state restore */
799 disable_irq
800 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
801 VM_OFF;
802 tophys(r1,r1);
803 RESTORE_REGS
804 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
805 lwi r1, r1, PT_R1 - PT_SIZE;
806 bri 6f;
807 /* MS: Return to kernel state. */
808 2:
809 #ifdef CONFIG_PREEMPT
810 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
811 /* MS: get preempt_count from thread info */
812 lwi r5, r11, TI_PREEMPT_COUNT;
813 bgti r5, restore;
814
815 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
816 andi r5, r5, _TIF_NEED_RESCHED;
817 beqi r5, restore /* if zero jump over */
818
819 preempt:
820 /* interrupts are off that's why I am calling preempt_chedule_irq */
821 bralid r15, preempt_schedule_irq
822 nop
823 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
824 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
825 andi r5, r5, _TIF_NEED_RESCHED;
826 bnei r5, preempt /* if non zero jump to resched */
827 restore:
828 #endif
829 VM_OFF /* MS: turn off MMU */
830 tophys(r1,r1)
831 RESTORE_REGS
832 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
833 tovirt(r1,r1);
834 6:
835 IRQ_return: /* MS: Make global symbol for debugging */
836 rtid r14, 0
837 nop
838
839 /*
840 * `Debug' trap
841 * We enter dbtrap in "BIP" (breakpoint) mode.
842 * So we exit the breakpoint mode with an 'rtbd' and proceed with the
843 * original dbtrap.
844 * however, wait to save state first
845 */
846 C_ENTRY(_debug_exception):
847 /* BIP bit is set on entry, no interrupts can occur */
848 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
849
850 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
851 set_bip; /*equalize initial state for all possible entries*/
852 clear_eip;
853 enable_irq;
854 mfs r11, rmsr
855 nop
856 andi r11, r11, MSR_UMS
857 bnei r11, 1f
858 /* Kernel-mode state save. */
859 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
860 tophys(r1,r11);
861 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
862 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
863
864 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
865 SAVE_REGS;
866
867 addi r11, r0, 1; /* Was in kernel-mode. */
868 swi r11, r1, PTO + PT_MODE;
869 brid 2f;
870 nop; /* Fill delay slot */
871 1: /* User-mode state save. */
872 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
873 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
874 tophys(r1,r1);
875 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
876 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
877 tophys(r1,r1);
878
879 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
880 SAVE_REGS;
881
882 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */
883 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
884 swi r11, r1, PTO+PT_R1; /* Store user SP. */
885 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
886 /* Save away the syscall number. */
887 swi r0, r1, PTO+PT_R0;
888 tovirt(r1,r1)
889
890 addi r5, r0, SIGTRAP /* send the trap signal */
891 add r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */
892 addk r7, r0, r0 /* 3rd param zero */
893
894 set_vms;
895 addik r11, r0, send_sig;
896 addik r15, r0, dbtrap_call;
897 dbtrap_call: rtbd r11, 0;
898 nop;
899
900 set_bip; /* Ints masked for state restore*/
901 lwi r11, r1, PTO+PT_MODE;
902 bnei r11, 2f;
903
904 /* Get current task ptr into r11 */
905 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
906 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
907 andi r11, r11, _TIF_NEED_RESCHED;
908 beqi r11, 5f;
909
910 /* Call the scheduler before returning from a syscall/trap. */
911
912 bralid r15, schedule; /* Call scheduler */
913 nop; /* delay slot */
914 /* XXX Is PT_DTRACE handling needed here? */
915 /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */
916
917 /* Maybe handle a signal */
918 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
919 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
920 andi r11, r11, _TIF_SIGPENDING;
921 beqi r11, 1f; /* Signals to handle, handle them */
922
923 /* Handle a signal return; Pending signals should be in r18. */
924 /* Not all registers are saved by the normal trap/interrupt entry
925 points (for instance, call-saved registers (because the normal
926 C-compiler calling sequence in the kernel makes sure they're
927 preserved), and call-clobbered registers in the case of
928 traps), but signal handlers may want to examine or change the
929 complete register state. Here we save anything not saved by
930 the normal entry sequence, so that it may be safely restored
931 (in a possibly modified form) after do_signal returns. */
932
933 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
934 addi r7, r0, 0; /* Arg 3: int in_syscall */
935 bralid r15, do_signal; /* Handle any signals */
936 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
937
938
939 /* Finally, return to user state. */
940 1:
941 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
942 VM_OFF;
943 tophys(r1,r1);
944
945 RESTORE_REGS
946 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
947
948
949 lwi r1, r1, PT_R1 - PT_SIZE;
950 /* Restore user stack pointer. */
951 bri 6f;
952
953 /* Return to kernel state. */
954 2: VM_OFF;
955 tophys(r1,r1);
956 RESTORE_REGS
957 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
958
959 tovirt(r1,r1);
960 6:
961 DBTRAP_return: /* Make global symbol for debugging */
962 rtbd r14, 0; /* Instructions to return from an IRQ */
963 nop;
964
965
966
967 ENTRY(_switch_to)
968 /* prepare return value */
969 addk r3, r0, CURRENT_TASK
970
971 /* save registers in cpu_context */
972 /* use r11 and r12, volatile registers, as temp register */
973 /* give start of cpu_context for previous process */
974 addik r11, r5, TI_CPU_CONTEXT
975 swi r1, r11, CC_R1
976 swi r2, r11, CC_R2
977 /* skip volatile registers.
978 * they are saved on stack when we jumped to _switch_to() */
979 /* dedicated registers */
980 swi r13, r11, CC_R13
981 swi r14, r11, CC_R14
982 swi r15, r11, CC_R15
983 swi r16, r11, CC_R16
984 swi r17, r11, CC_R17
985 swi r18, r11, CC_R18
986 /* save non-volatile registers */
987 swi r19, r11, CC_R19
988 swi r20, r11, CC_R20
989 swi r21, r11, CC_R21
990 swi r22, r11, CC_R22
991 swi r23, r11, CC_R23
992 swi r24, r11, CC_R24
993 swi r25, r11, CC_R25
994 swi r26, r11, CC_R26
995 swi r27, r11, CC_R27
996 swi r28, r11, CC_R28
997 swi r29, r11, CC_R29
998 swi r30, r11, CC_R30
999 /* special purpose registers */
1000 mfs r12, rmsr
1001 nop
1002 swi r12, r11, CC_MSR
1003 mfs r12, rear
1004 nop
1005 swi r12, r11, CC_EAR
1006 mfs r12, resr
1007 nop
1008 swi r12, r11, CC_ESR
1009 mfs r12, rfsr
1010 nop
1011 swi r12, r11, CC_FSR
1012
1013 /* update r31, the current-give me pointer to task which will be next */
1014 lwi CURRENT_TASK, r6, TI_TASK
1015 /* stored it to current_save too */
1016 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
1017
1018 /* get new process' cpu context and restore */
1019 /* give me start where start context of next task */
1020 addik r11, r6, TI_CPU_CONTEXT
1021
1022 /* non-volatile registers */
1023 lwi r30, r11, CC_R30
1024 lwi r29, r11, CC_R29
1025 lwi r28, r11, CC_R28
1026 lwi r27, r11, CC_R27
1027 lwi r26, r11, CC_R26
1028 lwi r25, r11, CC_R25
1029 lwi r24, r11, CC_R24
1030 lwi r23, r11, CC_R23
1031 lwi r22, r11, CC_R22
1032 lwi r21, r11, CC_R21
1033 lwi r20, r11, CC_R20
1034 lwi r19, r11, CC_R19
1035 /* dedicated registers */
1036 lwi r18, r11, CC_R18
1037 lwi r17, r11, CC_R17
1038 lwi r16, r11, CC_R16
1039 lwi r15, r11, CC_R15
1040 lwi r14, r11, CC_R14
1041 lwi r13, r11, CC_R13
1042 /* skip volatile registers */
1043 lwi r2, r11, CC_R2
1044 lwi r1, r11, CC_R1
1045
1046 /* special purpose registers */
1047 lwi r12, r11, CC_FSR
1048 mts rfsr, r12
1049 nop
1050 lwi r12, r11, CC_MSR
1051 mts rmsr, r12
1052 nop
1053
1054 rtsd r15, 8
1055 nop
1056
1057 ENTRY(_reset)
1058 brai 0x70; /* Jump back to FS-boot */
1059
1060 ENTRY(_break)
1061 mfs r5, rmsr
1062 nop
1063 swi r5, r0, 0x250 + TOPHYS(r0_ram)
1064 mfs r5, resr
1065 nop
1066 swi r5, r0, 0x254 + TOPHYS(r0_ram)
1067 bri 0
1068
1069 /* These are compiled and loaded into high memory, then
1070 * copied into place in mach_early_setup */
1071 .section .init.ivt, "ax"
1072 .org 0x0
1073 /* this is very important - here is the reset vector */
1074 /* in current MMU branch you don't care what is here - it is
1075 * used from bootloader site - but this is correct for FS-BOOT */
1076 brai 0x70
1077 nop
1078 brai TOPHYS(_user_exception); /* syscall handler */
1079 brai TOPHYS(_interrupt); /* Interrupt handler */
1080 brai TOPHYS(_break); /* nmi trap handler */
1081 brai TOPHYS(_hw_exception_handler); /* HW exception handler */
1082
1083 .org 0x60
1084 brai TOPHYS(_debug_exception); /* debug trap handler*/
1085
1086 .section .rodata,"a"
1087 #include "syscall_table.S"
1088
1089 syscall_table_size=(.-sys_call_table)
1090
1091 type_SYSCALL:
1092 .ascii "SYSCALL\0"
1093 type_IRQ:
1094 .ascii "IRQ\0"
1095 type_IRQ_PREEMPT:
1096 .ascii "IRQ (PREEMPTED)\0"
1097 type_SYSCALL_PREEMPT:
1098 .ascii " SYSCALL (PREEMPTED)\0"
1099
1100 /*
1101 * Trap decoding for stack unwinder
1102 * Tuples are (start addr, end addr, string)
1103 * If return address lies on [start addr, end addr],
1104 * unwinder displays 'string'
1105 */
1106
1107 .align 4
1108 .global microblaze_trap_handlers
1109 microblaze_trap_handlers:
1110 /* Exact matches come first */
1111 .word ret_from_trap; .word ret_from_trap ; .word type_SYSCALL
1112 .word ret_from_irq ; .word ret_from_irq ; .word type_IRQ
1113 /* Fuzzy matches go here */
1114 .word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT
1115 .word ret_from_trap; .word TRAP_return ; .word type_SYSCALL_PREEMPT
1116 /* End of table */
1117 .word 0 ; .word 0 ; .word 0
This page took 0.053697 seconds and 4 git commands to generate.