microblaze: Put together addik instructions
[deliverable/linux.git] / arch / microblaze / kernel / entry.S
... / ...
CommitLineData
1/*
2 * Low-level system-call handling, trap handlers and context-switching
3 *
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008-2009 PetaLogix
6 * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
7 * Copyright (C) 2001,2002 NEC Corporation
8 * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
9 *
10 * This file is subject to the terms and conditions of the GNU General
11 * Public License. See the file COPYING in the main directory of this
12 * archive for more details.
13 *
14 * Written by Miles Bader <miles@gnu.org>
15 * Heavily modified by John Williams for Microblaze
16 */
17
18#include <linux/sys.h>
19#include <linux/linkage.h>
20
21#include <asm/entry.h>
22#include <asm/current.h>
23#include <asm/processor.h>
24#include <asm/exceptions.h>
25#include <asm/asm-offsets.h>
26#include <asm/thread_info.h>
27
28#include <asm/page.h>
29#include <asm/unistd.h>
30
31#include <linux/errno.h>
32#include <asm/signal.h>
33
34#undef DEBUG
35
36/* The size of a state save frame. */
37#define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE)
38
39/* The offset of the struct pt_regs in a `state save frame' on the stack. */
40#define PTO STATE_SAVE_ARG_SPACE /* 24 the space for args */
41
42#define C_ENTRY(name) .globl name; .align 4; name
43
44/*
45 * Various ways of setting and clearing BIP in flags reg.
46 * This is mucky, but necessary using microblaze version that
47 * allows msr ops to write to BIP
48 */
49#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
50 .macro clear_bip
51 msrclr r0, MSR_BIP
52 nop
53 .endm
54
55 .macro set_bip
56 msrset r0, MSR_BIP
57 nop
58 .endm
59
60 .macro clear_eip
61 msrclr r0, MSR_EIP
62 nop
63 .endm
64
65 .macro set_ee
66 msrset r0, MSR_EE
67 nop
68 .endm
69
70 .macro disable_irq
71 msrclr r0, MSR_IE
72 nop
73 .endm
74
75 .macro enable_irq
76 msrset r0, MSR_IE
77 nop
78 .endm
79
80 .macro set_ums
81 msrset r0, MSR_UMS
82 nop
83 msrclr r0, MSR_VMS
84 nop
85 .endm
86
87 .macro set_vms
88 msrclr r0, MSR_UMS
89 nop
90 msrset r0, MSR_VMS
91 nop
92 .endm
93
94 .macro clear_ums
95 msrclr r0, MSR_UMS
96 nop
97 .endm
98
99 .macro clear_vms_ums
100 msrclr r0, MSR_VMS | MSR_UMS
101 nop
102 .endm
103#else
104 .macro clear_bip
105 mfs r11, rmsr
106 nop
107 andi r11, r11, ~MSR_BIP
108 mts rmsr, r11
109 nop
110 .endm
111
112 .macro set_bip
113 mfs r11, rmsr
114 nop
115 ori r11, r11, MSR_BIP
116 mts rmsr, r11
117 nop
118 .endm
119
120 .macro clear_eip
121 mfs r11, rmsr
122 nop
123 andi r11, r11, ~MSR_EIP
124 mts rmsr, r11
125 nop
126 .endm
127
128 .macro set_ee
129 mfs r11, rmsr
130 nop
131 ori r11, r11, MSR_EE
132 mts rmsr, r11
133 nop
134 .endm
135
136 .macro disable_irq
137 mfs r11, rmsr
138 nop
139 andi r11, r11, ~MSR_IE
140 mts rmsr, r11
141 nop
142 .endm
143
144 .macro enable_irq
145 mfs r11, rmsr
146 nop
147 ori r11, r11, MSR_IE
148 mts rmsr, r11
149 nop
150 .endm
151
152 .macro set_ums
153 mfs r11, rmsr
154 nop
155 ori r11, r11, MSR_VMS
156 andni r11, r11, MSR_UMS
157 mts rmsr, r11
158 nop
159 .endm
160
161 .macro set_vms
162 mfs r11, rmsr
163 nop
164 ori r11, r11, MSR_VMS
165 andni r11, r11, MSR_UMS
166 mts rmsr, r11
167 nop
168 .endm
169
170 .macro clear_ums
171 mfs r11, rmsr
172 nop
173 andni r11, r11, MSR_UMS
174 mts rmsr,r11
175 nop
176 .endm
177
178 .macro clear_vms_ums
179 mfs r11, rmsr
180 nop
181 andni r11, r11, (MSR_VMS|MSR_UMS)
182 mts rmsr,r11
183 nop
184 .endm
185#endif
186
187/* Define how to call high-level functions. With MMU, virtual mode must be
188 * enabled when calling the high-level function. Clobbers R11.
189 * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
190 */
191
192/* turn on virtual protected mode save */
193#define VM_ON \
194 set_ums; \
195 rted r0, 2f; \
196 nop; \
1972:
198
199/* turn off virtual protected mode save and user mode save*/
200#define VM_OFF \
201 clear_vms_ums; \
202 rted r0, TOPHYS(1f); \
203 nop; \
2041:
205
206#define SAVE_REGS \
207 swi r2, r1, PTO+PT_R2; /* Save SDA */ \
208 swi r3, r1, PTO+PT_R3; \
209 swi r4, r1, PTO+PT_R4; \
210 swi r5, r1, PTO+PT_R5; \
211 swi r6, r1, PTO+PT_R6; \
212 swi r7, r1, PTO+PT_R7; \
213 swi r8, r1, PTO+PT_R8; \
214 swi r9, r1, PTO+PT_R9; \
215 swi r10, r1, PTO+PT_R10; \
216 swi r11, r1, PTO+PT_R11; /* save clobbered regs after rval */\
217 swi r12, r1, PTO+PT_R12; \
218 swi r13, r1, PTO+PT_R13; /* Save SDA2 */ \
219 swi r14, r1, PTO+PT_PC; /* PC, before IRQ/trap */ \
220 swi r15, r1, PTO+PT_R15; /* Save LP */ \
221 swi r18, r1, PTO+PT_R18; /* Save asm scratch reg */ \
222 swi r19, r1, PTO+PT_R19; \
223 swi r20, r1, PTO+PT_R20; \
224 swi r21, r1, PTO+PT_R21; \
225 swi r22, r1, PTO+PT_R22; \
226 swi r23, r1, PTO+PT_R23; \
227 swi r24, r1, PTO+PT_R24; \
228 swi r25, r1, PTO+PT_R25; \
229 swi r26, r1, PTO+PT_R26; \
230 swi r27, r1, PTO+PT_R27; \
231 swi r28, r1, PTO+PT_R28; \
232 swi r29, r1, PTO+PT_R29; \
233 swi r30, r1, PTO+PT_R30; \
234 swi r31, r1, PTO+PT_R31; /* Save current task reg */ \
235 mfs r11, rmsr; /* save MSR */ \
236 nop; \
237 swi r11, r1, PTO+PT_MSR;
238
239#define RESTORE_REGS \
240 lwi r11, r1, PTO+PT_MSR; \
241 mts rmsr , r11; \
242 nop; \
243 lwi r2, r1, PTO+PT_R2; /* restore SDA */ \
244 lwi r3, r1, PTO+PT_R3; \
245 lwi r4, r1, PTO+PT_R4; \
246 lwi r5, r1, PTO+PT_R5; \
247 lwi r6, r1, PTO+PT_R6; \
248 lwi r7, r1, PTO+PT_R7; \
249 lwi r8, r1, PTO+PT_R8; \
250 lwi r9, r1, PTO+PT_R9; \
251 lwi r10, r1, PTO+PT_R10; \
252 lwi r11, r1, PTO+PT_R11; /* restore clobbered regs after rval */\
253 lwi r12, r1, PTO+PT_R12; \
254 lwi r13, r1, PTO+PT_R13; /* restore SDA2 */ \
255 lwi r14, r1, PTO+PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
256 lwi r15, r1, PTO+PT_R15; /* restore LP */ \
257 lwi r18, r1, PTO+PT_R18; /* restore asm scratch reg */ \
258 lwi r19, r1, PTO+PT_R19; \
259 lwi r20, r1, PTO+PT_R20; \
260 lwi r21, r1, PTO+PT_R21; \
261 lwi r22, r1, PTO+PT_R22; \
262 lwi r23, r1, PTO+PT_R23; \
263 lwi r24, r1, PTO+PT_R24; \
264 lwi r25, r1, PTO+PT_R25; \
265 lwi r26, r1, PTO+PT_R26; \
266 lwi r27, r1, PTO+PT_R27; \
267 lwi r28, r1, PTO+PT_R28; \
268 lwi r29, r1, PTO+PT_R29; \
269 lwi r30, r1, PTO+PT_R30; \
270 lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */
271
272#define SAVE_STATE \
273 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \
274 /* See if already in kernel mode.*/ \
275 mfs r1, rmsr; \
276 nop; \
277 andi r1, r1, MSR_UMS; \
278 bnei r1, 1f; \
279 /* Kernel-mode state save. */ \
280 /* Reload kernel stack-ptr. */ \
281 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
282 /* FIXME: I can add these two lines to one */ \
283 /* tophys(r1,r1); */ \
284 /* addik r1, r1, -STATE_SAVE_SIZE; */ \
285 addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE; \
286 SAVE_REGS \
287 brid 2f; \
288 swi r1, r1, PTO+PT_MODE; \
2891: /* User-mode state save. */ \
290 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
291 tophys(r1,r1); \
292 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
293 /* MS these three instructions can be added to one */ \
294 /* addik r1, r1, THREAD_SIZE; */ \
295 /* tophys(r1,r1); */ \
296 /* addik r1, r1, -STATE_SAVE_SIZE; */ \
297 addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE; \
298 SAVE_REGS \
299 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
300 swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
301 swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */ \
302 /* MS: I am clearing UMS even in case when I come from kernel space */ \
303 clear_ums; \
3042: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
305
306.text
307
308/*
309 * User trap.
310 *
311 * System calls are handled here.
312 *
313 * Syscall protocol:
314 * Syscall number in r12, args in r5-r10
315 * Return value in r3
316 *
317 * Trap entered via brki instruction, so BIP bit is set, and interrupts
318 * are masked. This is nice, means we don't have to CLI before state save
319 */
320C_ENTRY(_user_exception):
321 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
322 addi r14, r14, 4 /* return address is 4 byte after call */
323
324 mfs r1, rmsr
325 nop
326 andi r1, r1, MSR_UMS
327 bnei r1, 1f
328
329/* Kernel-mode state save - kernel execve */
330 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
331 tophys(r1,r1);
332
333 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
334 SAVE_REGS
335
336 swi r1, r1, PTO + PT_MODE; /* pt_regs -> kernel mode */
337 brid 2f;
338 nop; /* Fill delay slot */
339
340/* User-mode state save. */
3411:
342 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
343 tophys(r1,r1);
344 lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
345/* calculate kernel stack pointer from task struct 8k */
346 addik r1, r1, THREAD_SIZE;
347 tophys(r1,r1);
348
349 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
350 SAVE_REGS
351
352 swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */
353 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
354 swi r11, r1, PTO+PT_R1; /* Store user SP. */
3552: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
356 /* Save away the syscall number. */
357 swi r12, r1, PTO+PT_R0;
358 tovirt(r1,r1)
359
360/* where the trap should return need -8 to adjust for rtsd r15, 8*/
361/* Jump to the appropriate function for the system call number in r12
362 * (r12 is not preserved), or return an error if r12 is not valid. The LP
363 * register should point to the location where
364 * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
365
366 # Step into virtual mode.
367 set_vms;
368 addik r11, r0, 3f
369 rtid r11, 0
370 nop
3713:
372 lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
373 lwi r11, r11, TI_FLAGS /* get flags in thread info */
374 andi r11, r11, _TIF_WORK_SYSCALL_MASK
375 beqi r11, 4f
376
377 addik r3, r0, -ENOSYS
378 swi r3, r1, PTO + PT_R3
379 brlid r15, do_syscall_trace_enter
380 addik r5, r1, PTO + PT_R0
381
382 # do_syscall_trace_enter returns the new syscall nr.
383 addk r12, r0, r3
384 lwi r5, r1, PTO+PT_R5;
385 lwi r6, r1, PTO+PT_R6;
386 lwi r7, r1, PTO+PT_R7;
387 lwi r8, r1, PTO+PT_R8;
388 lwi r9, r1, PTO+PT_R9;
389 lwi r10, r1, PTO+PT_R10;
3904:
391/* Jump to the appropriate function for the system call number in r12
392 * (r12 is not preserved), or return an error if r12 is not valid.
393 * The LP register should point to the location where the called function
394 * should return. [note that MAKE_SYS_CALL uses label 1] */
395 /* See if the system call number is valid */
396 addi r11, r12, -__NR_syscalls;
397 bgei r11,5f;
398 /* Figure out which function to use for this system call. */
399 /* Note Microblaze barrel shift is optional, so don't rely on it */
400 add r12, r12, r12; /* convert num -> ptr */
401 add r12, r12, r12;
402
403#ifdef DEBUG
404 /* Trac syscalls and stored them to r0_ram */
405 lwi r3, r12, 0x400 + r0_ram
406 addi r3, r3, 1
407 swi r3, r12, 0x400 + r0_ram
408#endif
409
410 # Find and jump into the syscall handler.
411 lwi r12, r12, sys_call_table
412 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
413 addi r15, r0, ret_from_trap-8
414 bra r12
415
416 /* The syscall number is invalid, return an error. */
4175:
418 rtsd r15, 8; /* looks like a normal subroutine return */
419 addi r3, r0, -ENOSYS;
420
421/* Entry point used to return from a syscall/trap */
422/* We re-enable BIP bit before state restore */
423C_ENTRY(ret_from_trap):
424 swi r3, r1, PTO + PT_R3
425 swi r4, r1, PTO + PT_R4
426
427 lwi r11, r1, PTO + PT_MODE;
428/* See if returning to kernel mode, if so, skip resched &c. */
429 bnei r11, 2f;
430 /* We're returning to user mode, so check for various conditions that
431 * trigger rescheduling. */
432 /* FIXME: Restructure all these flag checks. */
433 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
434 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
435 andi r11, r11, _TIF_WORK_SYSCALL_MASK
436 beqi r11, 1f
437
438 brlid r15, do_syscall_trace_leave
439 addik r5, r1, PTO + PT_R0
4401:
441 /* We're returning to user mode, so check for various conditions that
442 * trigger rescheduling. */
443 /* get thread info from current task */
444 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
445 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
446 andi r11, r11, _TIF_NEED_RESCHED;
447 beqi r11, 5f;
448
449 bralid r15, schedule; /* Call scheduler */
450 nop; /* delay slot */
451
452 /* Maybe handle a signal */
4535: /* get thread info from current task*/
454 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
455 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
456 andi r11, r11, _TIF_SIGPENDING;
457 beqi r11, 1f; /* Signals to handle, handle them */
458
459 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
460 addi r7, r0, 1; /* Arg 3: int in_syscall */
461 bralid r15, do_signal; /* Handle any signals */
462 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
463
464/* Finally, return to user state. */
4651: set_bip; /* Ints masked for state restore */
466 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
467 VM_OFF;
468 tophys(r1,r1);
469 RESTORE_REGS;
470 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
471 lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
472 bri 6f;
473
474/* Return to kernel state. */
4752: set_bip; /* Ints masked for state restore */
476 VM_OFF;
477 tophys(r1,r1);
478 RESTORE_REGS;
479 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
480 tovirt(r1,r1);
4816:
482TRAP_return: /* Make global symbol for debugging */
483 rtbd r14, 0; /* Instructions to return from an IRQ */
484 nop;
485
486
487/* These syscalls need access to the struct pt_regs on the stack, so we
488 implement them in assembly (they're basically all wrappers anyway). */
489
490C_ENTRY(sys_fork_wrapper):
491 addi r5, r0, SIGCHLD /* Arg 0: flags */
492 lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */
493 addik r7, r1, PTO /* Arg 2: parent context */
494 add r8. r0, r0 /* Arg 3: (unused) */
495 add r9, r0, r0; /* Arg 4: (unused) */
496 brid do_fork /* Do real work (tail-call) */
497 add r10, r0, r0; /* Arg 5: (unused) */
498
499/* This the initial entry point for a new child thread, with an appropriate
500 stack in place that makes it look the the child is in the middle of an
501 syscall. This function is actually `returned to' from switch_thread
502 (copy_thread makes ret_from_fork the return address in each new thread's
503 saved context). */
504C_ENTRY(ret_from_fork):
505 bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
506 add r3, r5, r0; /* switch_thread returns the prev task */
507 /* ( in the delay slot ) */
508 brid ret_from_trap; /* Do normal trap return */
509 add r3, r0, r0; /* Child's fork call should return 0. */
510
511C_ENTRY(sys_vfork):
512 brid microblaze_vfork /* Do real work (tail-call) */
513 addik r5, r1, PTO
514
515C_ENTRY(sys_clone):
516 bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */
517 lwi r6, r1, PTO + PT_R1; /* If so, use paret's stack ptr */
5181: addik r7, r1, PTO; /* Arg 2: parent context */
519 add r8, r0, r0; /* Arg 3: (unused) */
520 add r9, r0, r0; /* Arg 4: (unused) */
521 brid do_fork /* Do real work (tail-call) */
522 add r10, r0, r0; /* Arg 5: (unused) */
523
524C_ENTRY(sys_execve):
525 brid microblaze_execve; /* Do real work (tail-call).*/
526 addik r8, r1, PTO; /* add user context as 4th arg */
527
528C_ENTRY(sys_rt_sigreturn_wrapper):
529 swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
530 swi r4, r1, PTO+PT_R4;
531 brlid r15, sys_rt_sigreturn /* Do real work */
532 addik r5, r1, PTO; /* add user context as 1st arg */
533 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
534 lwi r4, r1, PTO+PT_R4;
535 bri ret_from_trap /* fall through will not work here due to align */
536 nop;
537
538/*
539 * HW EXCEPTION rutine start
540 */
541C_ENTRY(full_exception_trap):
542 /* adjust exception address for privileged instruction
543 * for finding where is it */
544 addik r17, r17, -4
545 SAVE_STATE /* Save registers */
546 /* PC, before IRQ/trap - this is one instruction above */
547 swi r17, r1, PTO+PT_PC;
548 tovirt(r1,r1)
549 /* FIXME this can be store directly in PT_ESR reg.
550 * I tested it but there is a fault */
551 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
552 addik r15, r0, ret_from_exc - 8
553 mfs r6, resr
554 nop
555 mfs r7, rfsr; /* save FSR */
556 nop
557 mts rfsr, r0; /* Clear sticky fsr */
558 nop
559 rted r0, full_exception
560 addik r5, r1, PTO /* parameter struct pt_regs * regs */
561
562/*
563 * Unaligned data trap.
564 *
565 * Unaligned data trap last on 4k page is handled here.
566 *
567 * Trap entered via exception, so EE bit is set, and interrupts
568 * are masked. This is nice, means we don't have to CLI before state save
569 *
570 * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
571 */
572C_ENTRY(unaligned_data_trap):
573 /* MS: I have to save r11 value and then restore it because
574 * set_bit, clear_eip, set_ee use r11 as temp register if MSR
575 * instructions are not used. We don't need to do if MSR instructions
576 * are used and they use r0 instead of r11.
577 * I am using ENTRY_SP which should be primary used only for stack
578 * pointer saving. */
579 swi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
580 set_bip; /* equalize initial state for all possible entries */
581 clear_eip;
582 set_ee;
583 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
584 SAVE_STATE /* Save registers.*/
585 /* PC, before IRQ/trap - this is one instruction above */
586 swi r17, r1, PTO+PT_PC;
587 tovirt(r1,r1)
588 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
589 addik r15, r0, ret_from_exc-8
590 mfs r3, resr /* ESR */
591 nop
592 mfs r4, rear /* EAR */
593 nop
594 rtbd r0, _unaligned_data_exception
595 addik r7, r1, PTO /* parameter struct pt_regs * regs */
596
597/*
598 * Page fault traps.
599 *
600 * If the real exception handler (from hw_exception_handler.S) didn't find
601 * the mapping for the process, then we're thrown here to handle such situation.
602 *
603 * Trap entered via exceptions, so EE bit is set, and interrupts
604 * are masked. This is nice, means we don't have to CLI before state save
605 *
606 * Build a standard exception frame for TLB Access errors. All TLB exceptions
607 * will bail out to this point if they can't resolve the lightweight TLB fault.
608 *
609 * The C function called is in "arch/microblaze/mm/fault.c", declared as:
610 * void do_page_fault(struct pt_regs *regs,
611 * unsigned long address,
612 * unsigned long error_code)
613 */
614/* data and intruction trap - which is choose is resolved int fault.c */
615C_ENTRY(page_fault_data_trap):
616 SAVE_STATE /* Save registers.*/
617 /* PC, before IRQ/trap - this is one instruction above */
618 swi r17, r1, PTO+PT_PC;
619 tovirt(r1,r1)
620 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
621 addik r15, r0, ret_from_exc-8
622 mfs r6, rear /* parameter unsigned long address */
623 nop
624 mfs r7, resr /* parameter unsigned long error_code */
625 nop
626 rted r0, do_page_fault
627 addik r5, r1, PTO /* parameter struct pt_regs * regs */
628
629C_ENTRY(page_fault_instr_trap):
630 SAVE_STATE /* Save registers.*/
631 /* PC, before IRQ/trap - this is one instruction above */
632 swi r17, r1, PTO+PT_PC;
633 tovirt(r1,r1)
634 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
635 addik r15, r0, ret_from_exc-8
636 mfs r6, rear /* parameter unsigned long address */
637 nop
638 ori r7, r0, 0 /* parameter unsigned long error_code */
639 rted r0, do_page_fault
640 addik r5, r1, PTO /* parameter struct pt_regs * regs */
641
642/* Entry point used to return from an exception. */
643C_ENTRY(ret_from_exc):
644 lwi r11, r1, PTO + PT_MODE;
645 bnei r11, 2f; /* See if returning to kernel mode, */
646 /* ... if so, skip resched &c. */
647
648 /* We're returning to user mode, so check for various conditions that
649 trigger rescheduling. */
650 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
651 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
652 andi r11, r11, _TIF_NEED_RESCHED;
653 beqi r11, 5f;
654
655/* Call the scheduler before returning from a syscall/trap. */
656 bralid r15, schedule; /* Call scheduler */
657 nop; /* delay slot */
658
659 /* Maybe handle a signal */
6605: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
661 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
662 andi r11, r11, _TIF_SIGPENDING;
663 beqi r11, 1f; /* Signals to handle, handle them */
664
665 /*
666 * Handle a signal return; Pending signals should be in r18.
667 *
668 * Not all registers are saved by the normal trap/interrupt entry
669 * points (for instance, call-saved registers (because the normal
670 * C-compiler calling sequence in the kernel makes sure they're
671 * preserved), and call-clobbered registers in the case of
672 * traps), but signal handlers may want to examine or change the
673 * complete register state. Here we save anything not saved by
674 * the normal entry sequence, so that it may be safely restored
675 * (in a possibly modified form) after do_signal returns. */
676 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
677 addi r7, r0, 0; /* Arg 3: int in_syscall */
678 bralid r15, do_signal; /* Handle any signals */
679 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
680
681/* Finally, return to user state. */
6821: set_bip; /* Ints masked for state restore */
683 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
684 VM_OFF;
685 tophys(r1,r1);
686
687 RESTORE_REGS;
688 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
689
690 lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
691 bri 6f;
692/* Return to kernel state. */
6932: set_bip; /* Ints masked for state restore */
694 VM_OFF;
695 tophys(r1,r1);
696 RESTORE_REGS;
697 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
698
699 tovirt(r1,r1);
7006:
701EXC_return: /* Make global symbol for debugging */
702 rtbd r14, 0; /* Instructions to return from an IRQ */
703 nop;
704
705/*
706 * HW EXCEPTION rutine end
707 */
708
709/*
710 * Hardware maskable interrupts.
711 *
712 * The stack-pointer (r1) should have already been saved to the memory
713 * location PER_CPU(ENTRY_SP).
714 */
715C_ENTRY(_interrupt):
716/* MS: we are in physical address */
717/* Save registers, switch to proper stack, convert SP to virtual.*/
718 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
719 /* MS: See if already in kernel mode. */
720 mfs r1, rmsr
721 nop
722 andi r1, r1, MSR_UMS
723 bnei r1, 1f
724
725/* Kernel-mode state save. */
726 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
727 tophys(r1,r1); /* MS: I have in r1 physical address where stack is */
728 /* save registers */
729/* MS: Make room on the stack -> activation record */
730 addik r1, r1, -STATE_SAVE_SIZE;
731 SAVE_REGS
732 swi r1, r1, PTO + PT_MODE; /* 0 - user mode, 1 - kernel mode */
733 brid 2f;
734 nop; /* MS: Fill delay slot */
735
7361:
737/* User-mode state save. */
738 /* MS: get the saved current */
739 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
740 tophys(r1,r1);
741 lwi r1, r1, TS_THREAD_INFO;
742 addik r1, r1, THREAD_SIZE;
743 tophys(r1,r1);
744 /* save registers */
745 addik r1, r1, -STATE_SAVE_SIZE;
746 SAVE_REGS
747 /* calculate mode */
748 swi r0, r1, PTO + PT_MODE;
749 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
750 swi r11, r1, PTO+PT_R1;
7512:
752 lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
753 tovirt(r1,r1)
754 addik r5, r1, PTO;
755 set_vms;
756 addik r11, r0, do_IRQ;
757 addik r15, r0, irq_call;
758irq_call:rtbd r11, 0;
759 nop;
760
761/* MS: we are in virtual mode */
762ret_from_irq:
763 lwi r11, r1, PTO + PT_MODE;
764 bnei r11, 2f;
765
766 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
767 lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
768 andi r11, r11, _TIF_NEED_RESCHED;
769 beqi r11, 5f
770 bralid r15, schedule;
771 nop; /* delay slot */
772
773 /* Maybe handle a signal */
7745: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */
775 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
776 andi r11, r11, _TIF_SIGPENDING;
777 beqid r11, no_intr_resched
778/* Handle a signal return; Pending signals should be in r18. */
779 addi r7, r0, 0; /* Arg 3: int in_syscall */
780 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
781 bralid r15, do_signal; /* Handle any signals */
782 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
783
784/* Finally, return to user state. */
785no_intr_resched:
786 /* Disable interrupts, we are now committed to the state restore */
787 disable_irq
788 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
789 VM_OFF;
790 tophys(r1,r1);
791 RESTORE_REGS
792 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
793 lwi r1, r1, PT_R1 - PT_SIZE;
794 bri 6f;
795/* MS: Return to kernel state. */
7962:
797#ifdef CONFIG_PREEMPT
798 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
799 /* MS: get preempt_count from thread info */
800 lwi r5, r11, TI_PREEMPT_COUNT;
801 bgti r5, restore;
802
803 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
804 andi r5, r5, _TIF_NEED_RESCHED;
805 beqi r5, restore /* if zero jump over */
806
807preempt:
808 /* interrupts are off that's why I am calling preempt_chedule_irq */
809 bralid r15, preempt_schedule_irq
810 nop
811 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
812 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
813 andi r5, r5, _TIF_NEED_RESCHED;
814 bnei r5, preempt /* if non zero jump to resched */
815restore:
816#endif
817 VM_OFF /* MS: turn off MMU */
818 tophys(r1,r1)
819 RESTORE_REGS
820 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
821 tovirt(r1,r1);
8226:
823IRQ_return: /* MS: Make global symbol for debugging */
824 rtid r14, 0
825 nop
826
827/*
828 * `Debug' trap
829 * We enter dbtrap in "BIP" (breakpoint) mode.
830 * So we exit the breakpoint mode with an 'rtbd' and proceed with the
831 * original dbtrap.
832 * however, wait to save state first
833 */
834C_ENTRY(_debug_exception):
835 /* BIP bit is set on entry, no interrupts can occur */
836 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
837
838 mfs r1, rmsr
839 nop
840 andi r1, r1, MSR_UMS
841 bnei r1, 1f
842 /* Kernel-mode state save. */
843 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
844 tophys(r1,r1);
845
846 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
847 SAVE_REGS;
848
849 swi r1, r1, PTO + PT_MODE;
850 brid 2f;
851 nop; /* Fill delay slot */
8521: /* User-mode state save. */
853 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
854 tophys(r1,r1);
855 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
856 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
857 tophys(r1,r1);
858
859 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
860 SAVE_REGS;
861
862 swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */
863 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
864 swi r11, r1, PTO+PT_R1; /* Store user SP. */
8652:
866 tovirt(r1,r1)
867
868 set_vms;
869 addi r5, r0, SIGTRAP /* send the trap signal */
870 add r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */
871 addk r7, r0, r0 /* 3rd param zero */
872dbtrap_call: rtbd r0, send_sig;
873 addik r15, r0, dbtrap_call;
874
875 set_bip; /* Ints masked for state restore*/
876 lwi r11, r1, PTO + PT_MODE;
877 bnei r11, 2f;
878
879 /* Get current task ptr into r11 */
880 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
881 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
882 andi r11, r11, _TIF_NEED_RESCHED;
883 beqi r11, 5f;
884
885/* Call the scheduler before returning from a syscall/trap. */
886
887 bralid r15, schedule; /* Call scheduler */
888 nop; /* delay slot */
889 /* XXX Is PT_DTRACE handling needed here? */
890 /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */
891
892 /* Maybe handle a signal */
8935: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
894 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
895 andi r11, r11, _TIF_SIGPENDING;
896 beqi r11, 1f; /* Signals to handle, handle them */
897
898/* Handle a signal return; Pending signals should be in r18. */
899 /* Not all registers are saved by the normal trap/interrupt entry
900 points (for instance, call-saved registers (because the normal
901 C-compiler calling sequence in the kernel makes sure they're
902 preserved), and call-clobbered registers in the case of
903 traps), but signal handlers may want to examine or change the
904 complete register state. Here we save anything not saved by
905 the normal entry sequence, so that it may be safely restored
906 (in a possibly modified form) after do_signal returns. */
907
908 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
909 addi r7, r0, 0; /* Arg 3: int in_syscall */
910 bralid r15, do_signal; /* Handle any signals */
911 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
912
913
914/* Finally, return to user state. */
9151:
916 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
917 VM_OFF;
918 tophys(r1,r1);
919
920 RESTORE_REGS
921 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
922
923
924 lwi r1, r1, PT_R1 - PT_SIZE;
925 /* Restore user stack pointer. */
926 bri 6f;
927
928/* Return to kernel state. */
9292: VM_OFF;
930 tophys(r1,r1);
931 RESTORE_REGS
932 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
933
934 tovirt(r1,r1);
9356:
936DBTRAP_return: /* Make global symbol for debugging */
937 rtbd r14, 0; /* Instructions to return from an IRQ */
938 nop;
939
940
941
942ENTRY(_switch_to)
943 /* prepare return value */
944 addk r3, r0, CURRENT_TASK
945
946 /* save registers in cpu_context */
947 /* use r11 and r12, volatile registers, as temp register */
948 /* give start of cpu_context for previous process */
949 addik r11, r5, TI_CPU_CONTEXT
950 swi r1, r11, CC_R1
951 swi r2, r11, CC_R2
952 /* skip volatile registers.
953 * they are saved on stack when we jumped to _switch_to() */
954 /* dedicated registers */
955 swi r13, r11, CC_R13
956 swi r14, r11, CC_R14
957 swi r15, r11, CC_R15
958 swi r16, r11, CC_R16
959 swi r17, r11, CC_R17
960 swi r18, r11, CC_R18
961 /* save non-volatile registers */
962 swi r19, r11, CC_R19
963 swi r20, r11, CC_R20
964 swi r21, r11, CC_R21
965 swi r22, r11, CC_R22
966 swi r23, r11, CC_R23
967 swi r24, r11, CC_R24
968 swi r25, r11, CC_R25
969 swi r26, r11, CC_R26
970 swi r27, r11, CC_R27
971 swi r28, r11, CC_R28
972 swi r29, r11, CC_R29
973 swi r30, r11, CC_R30
974 /* special purpose registers */
975 mfs r12, rmsr
976 nop
977 swi r12, r11, CC_MSR
978 mfs r12, rear
979 nop
980 swi r12, r11, CC_EAR
981 mfs r12, resr
982 nop
983 swi r12, r11, CC_ESR
984 mfs r12, rfsr
985 nop
986 swi r12, r11, CC_FSR
987
988 /* update r31, the current-give me pointer to task which will be next */
989 lwi CURRENT_TASK, r6, TI_TASK
990 /* stored it to current_save too */
991 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
992
993 /* get new process' cpu context and restore */
994 /* give me start where start context of next task */
995 addik r11, r6, TI_CPU_CONTEXT
996
997 /* non-volatile registers */
998 lwi r30, r11, CC_R30
999 lwi r29, r11, CC_R29
1000 lwi r28, r11, CC_R28
1001 lwi r27, r11, CC_R27
1002 lwi r26, r11, CC_R26
1003 lwi r25, r11, CC_R25
1004 lwi r24, r11, CC_R24
1005 lwi r23, r11, CC_R23
1006 lwi r22, r11, CC_R22
1007 lwi r21, r11, CC_R21
1008 lwi r20, r11, CC_R20
1009 lwi r19, r11, CC_R19
1010 /* dedicated registers */
1011 lwi r18, r11, CC_R18
1012 lwi r17, r11, CC_R17
1013 lwi r16, r11, CC_R16
1014 lwi r15, r11, CC_R15
1015 lwi r14, r11, CC_R14
1016 lwi r13, r11, CC_R13
1017 /* skip volatile registers */
1018 lwi r2, r11, CC_R2
1019 lwi r1, r11, CC_R1
1020
1021 /* special purpose registers */
1022 lwi r12, r11, CC_FSR
1023 mts rfsr, r12
1024 nop
1025 lwi r12, r11, CC_MSR
1026 mts rmsr, r12
1027 nop
1028
1029 rtsd r15, 8
1030 nop
1031
1032ENTRY(_reset)
1033 brai 0x70; /* Jump back to FS-boot */
1034
1035ENTRY(_break)
1036 mfs r5, rmsr
1037 nop
1038 swi r5, r0, 0x250 + TOPHYS(r0_ram)
1039 mfs r5, resr
1040 nop
1041 swi r5, r0, 0x254 + TOPHYS(r0_ram)
1042 bri 0
1043
1044 /* These are compiled and loaded into high memory, then
1045 * copied into place in mach_early_setup */
1046 .section .init.ivt, "ax"
1047 .org 0x0
1048 /* this is very important - here is the reset vector */
1049 /* in current MMU branch you don't care what is here - it is
1050 * used from bootloader site - but this is correct for FS-BOOT */
1051 brai 0x70
1052 nop
1053 brai TOPHYS(_user_exception); /* syscall handler */
1054 brai TOPHYS(_interrupt); /* Interrupt handler */
1055 brai TOPHYS(_break); /* nmi trap handler */
1056 brai TOPHYS(_hw_exception_handler); /* HW exception handler */
1057
1058 .org 0x60
1059 brai TOPHYS(_debug_exception); /* debug trap handler*/
1060
1061.section .rodata,"a"
1062#include "syscall_table.S"
1063
1064syscall_table_size=(.-sys_call_table)
1065
1066type_SYSCALL:
1067 .ascii "SYSCALL\0"
1068type_IRQ:
1069 .ascii "IRQ\0"
1070type_IRQ_PREEMPT:
1071 .ascii "IRQ (PREEMPTED)\0"
1072type_SYSCALL_PREEMPT:
1073 .ascii " SYSCALL (PREEMPTED)\0"
1074
1075 /*
1076 * Trap decoding for stack unwinder
1077 * Tuples are (start addr, end addr, string)
1078 * If return address lies on [start addr, end addr],
1079 * unwinder displays 'string'
1080 */
1081
1082 .align 4
1083.global microblaze_trap_handlers
1084microblaze_trap_handlers:
1085 /* Exact matches come first */
1086 .word ret_from_trap; .word ret_from_trap ; .word type_SYSCALL
1087 .word ret_from_irq ; .word ret_from_irq ; .word type_IRQ
1088 /* Fuzzy matches go here */
1089 .word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT
1090 .word ret_from_trap; .word TRAP_return ; .word type_SYSCALL_PREEMPT
1091 /* End of table */
1092 .word 0 ; .word 0 ; .word 0
This page took 0.032477 seconds and 5 git commands to generate.