Add CONFIG_AUDITSC and CONFIG_SECCOMP support for ppc32
[deliverable/linux.git] / arch / ppc / kernel / entry.S
1 /*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
22 #include <linux/config.h>
23 #include <linux/errno.h>
24 #include <linux/sys.h>
25 #include <linux/threads.h>
26 #include <asm/processor.h>
27 #include <asm/page.h>
28 #include <asm/mmu.h>
29 #include <asm/cputable.h>
30 #include <asm/thread_info.h>
31 #include <asm/ppc_asm.h>
32 #include <asm/offsets.h>
33 #include <asm/unistd.h>
34
35 #undef SHOW_SYSCALLS
36 #undef SHOW_SYSCALLS_TASK
37
38 /*
39 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
40 */
41 #if MSR_KERNEL >= 0x10000
42 #define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
43 #else
44 #define LOAD_MSR_KERNEL(r, x) li r,(x)
45 #endif
46
47 #ifdef CONFIG_BOOKE
48 #include "head_booke.h"
49 .globl mcheck_transfer_to_handler
50 mcheck_transfer_to_handler:
51 mtspr MCHECK_SPRG,r8
52 BOOKE_LOAD_MCHECK_STACK
53 lwz r0,GPR10-INT_FRAME_SIZE(r8)
54 stw r0,GPR10(r11)
55 lwz r0,GPR11-INT_FRAME_SIZE(r8)
56 stw r0,GPR11(r11)
57 mfspr r8,MCHECK_SPRG
58 b transfer_to_handler_full
59
60 .globl crit_transfer_to_handler
61 crit_transfer_to_handler:
62 mtspr CRIT_SPRG,r8
63 BOOKE_LOAD_CRIT_STACK
64 lwz r0,GPR10-INT_FRAME_SIZE(r8)
65 stw r0,GPR10(r11)
66 lwz r0,GPR11-INT_FRAME_SIZE(r8)
67 stw r0,GPR11(r11)
68 mfspr r8,CRIT_SPRG
69 /* fall through */
70 #endif
71
72 #ifdef CONFIG_40x
73 .globl crit_transfer_to_handler
74 crit_transfer_to_handler:
75 lwz r0,crit_r10@l(0)
76 stw r0,GPR10(r11)
77 lwz r0,crit_r11@l(0)
78 stw r0,GPR11(r11)
79 /* fall through */
80 #endif
81
82 /*
83 * This code finishes saving the registers to the exception frame
84 * and jumps to the appropriate handler for the exception, turning
85 * on address translation.
86 * Note that we rely on the caller having set cr0.eq iff the exception
87 * occurred in kernel mode (i.e. MSR:PR = 0).
88 */
89 .globl transfer_to_handler_full
90 transfer_to_handler_full:
91 SAVE_NVGPRS(r11)
92 /* fall through */
93
94 .globl transfer_to_handler
95 transfer_to_handler:
96 stw r2,GPR2(r11)
97 stw r12,_NIP(r11)
98 stw r9,_MSR(r11)
99 andi. r2,r9,MSR_PR
100 mfctr r12
101 mfspr r2,SPRN_XER
102 stw r12,_CTR(r11)
103 stw r2,_XER(r11)
104 mfspr r12,SPRN_SPRG3
105 addi r2,r12,-THREAD
106 tovirt(r2,r2) /* set r2 to current */
107 beq 2f /* if from user, fix up THREAD.regs */
108 addi r11,r1,STACK_FRAME_OVERHEAD
109 stw r11,PT_REGS(r12)
110 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
111 /* Check to see if the dbcr0 register is set up to debug. Use the
112 single-step bit to do this. */
113 lwz r12,THREAD_DBCR0(r12)
114 andis. r12,r12,DBCR0_IC@h
115 beq+ 3f
116 /* From user and task is ptraced - load up global dbcr0 */
117 li r12,-1 /* clear all pending debug events */
118 mtspr SPRN_DBSR,r12
119 lis r11,global_dbcr0@ha
120 tophys(r11,r11)
121 addi r11,r11,global_dbcr0@l
122 lwz r12,0(r11)
123 mtspr SPRN_DBCR0,r12
124 lwz r12,4(r11)
125 addi r12,r12,-1
126 stw r12,4(r11)
127 #endif
128 b 3f
129 2: /* if from kernel, check interrupted DOZE/NAP mode and
130 * check for stack overflow
131 */
132 #ifdef CONFIG_6xx
133 mfspr r11,SPRN_HID0
134 mtcr r11
135 BEGIN_FTR_SECTION
136 bt- 8,power_save_6xx_restore /* Check DOZE */
137 END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
138 BEGIN_FTR_SECTION
139 bt- 9,power_save_6xx_restore /* Check NAP */
140 END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
141 #endif /* CONFIG_6xx */
142 .globl transfer_to_handler_cont
143 transfer_to_handler_cont:
144 lwz r11,THREAD_INFO-THREAD(r12)
145 cmplw r1,r11 /* if r1 <= current->thread_info */
146 ble- stack_ovf /* then the kernel stack overflowed */
147 3:
148 mflr r9
149 lwz r11,0(r9) /* virtual address of handler */
150 lwz r9,4(r9) /* where to go when done */
151 FIX_SRR1(r10,r12)
152 mtspr SPRN_SRR0,r11
153 mtspr SPRN_SRR1,r10
154 mtlr r9
155 SYNC
156 RFI /* jump to handler, enable MMU */
157
158 /*
159 * On kernel stack overflow, load up an initial stack pointer
160 * and call StackOverflow(regs), which should not return.
161 */
162 stack_ovf:
163 /* sometimes we use a statically-allocated stack, which is OK. */
164 lis r11,_end@h
165 ori r11,r11,_end@l
166 cmplw r1,r11
167 ble 3b /* r1 <= &_end is OK */
168 SAVE_NVGPRS(r11)
169 addi r3,r1,STACK_FRAME_OVERHEAD
170 lis r1,init_thread_union@ha
171 addi r1,r1,init_thread_union@l
172 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
173 lis r9,StackOverflow@ha
174 addi r9,r9,StackOverflow@l
175 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
176 FIX_SRR1(r10,r12)
177 mtspr SPRN_SRR0,r9
178 mtspr SPRN_SRR1,r10
179 SYNC
180 RFI
181
182 /*
183 * Handle a system call.
184 */
185 .stabs "arch/ppc/kernel/",N_SO,0,0,0f
186 .stabs "entry.S",N_SO,0,0,0f
187 0:
188
189 _GLOBAL(DoSyscall)
190 stw r0,THREAD+LAST_SYSCALL(r2)
191 stw r3,ORIG_GPR3(r1)
192 li r12,0
193 stw r12,RESULT(r1)
194 lwz r11,_CCR(r1) /* Clear SO bit in CR */
195 rlwinm r11,r11,0,4,2
196 stw r11,_CCR(r1)
197 #ifdef SHOW_SYSCALLS
198 bl do_show_syscall
199 #endif /* SHOW_SYSCALLS */
200 rlwinm r10,r1,0,0,18 /* current_thread_info() */
201 lwz r11,TI_LOCAL_FLAGS(r10)
202 rlwinm r11,r11,0,~_TIFL_FORCE_NOERROR
203 stw r11,TI_LOCAL_FLAGS(r10)
204 lwz r11,TI_FLAGS(r10)
205 andi. r11,r11,_TIF_SYSCALL_T_OR_A
206 bne- syscall_dotrace
207 syscall_dotrace_cont:
208 cmplwi 0,r0,NR_syscalls
209 lis r10,sys_call_table@h
210 ori r10,r10,sys_call_table@l
211 slwi r0,r0,2
212 bge- 66f
213 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
214 mtlr r10
215 addi r9,r1,STACK_FRAME_OVERHEAD
216 blrl /* Call handler */
217 .globl ret_from_syscall
218 ret_from_syscall:
219 #ifdef SHOW_SYSCALLS
220 bl do_show_syscall_exit
221 #endif
222 mr r6,r3
223 li r11,-_LAST_ERRNO
224 cmplw 0,r3,r11
225 rlwinm r12,r1,0,0,18 /* current_thread_info() */
226 blt+ 30f
227 lwz r11,TI_LOCAL_FLAGS(r12)
228 andi. r11,r11,_TIFL_FORCE_NOERROR
229 bne 30f
230 neg r3,r3
231 lwz r10,_CCR(r1) /* Set SO bit in CR */
232 oris r10,r10,0x1000
233 stw r10,_CCR(r1)
234
235 /* disable interrupts so current_thread_info()->flags can't change */
236 30: LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
237 SYNC
238 MTMSRD(r10)
239 lwz r9,TI_FLAGS(r12)
240 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
241 bne- syscall_exit_work
242 syscall_exit_cont:
243 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
244 /* If the process has its own DBCR0 value, load it up. The single
245 step bit tells us that dbcr0 should be loaded. */
246 lwz r0,THREAD+THREAD_DBCR0(r2)
247 andis. r10,r0,DBCR0_IC@h
248 bnel- load_dbcr0
249 #endif
250 stwcx. r0,0,r1 /* to clear the reservation */
251 lwz r4,_LINK(r1)
252 lwz r5,_CCR(r1)
253 mtlr r4
254 mtcr r5
255 lwz r7,_NIP(r1)
256 lwz r8,_MSR(r1)
257 FIX_SRR1(r8, r0)
258 lwz r2,GPR2(r1)
259 lwz r1,GPR1(r1)
260 mtspr SPRN_SRR0,r7
261 mtspr SPRN_SRR1,r8
262 SYNC
263 RFI
264
265 66: li r3,-ENOSYS
266 b ret_from_syscall
267
268 .globl ret_from_fork
269 ret_from_fork:
270 REST_NVGPRS(r1)
271 bl schedule_tail
272 li r3,0
273 b ret_from_syscall
274
275 /* Traced system call support */
276 syscall_dotrace:
277 SAVE_NVGPRS(r1)
278 li r0,0xc00
279 stw r0,TRAP(r1)
280 addi r3,r1,STACK_FRAME_OVERHEAD
281 bl do_syscall_trace_enter
282 lwz r0,GPR0(r1) /* Restore original registers */
283 lwz r3,GPR3(r1)
284 lwz r4,GPR4(r1)
285 lwz r5,GPR5(r1)
286 lwz r6,GPR6(r1)
287 lwz r7,GPR7(r1)
288 lwz r8,GPR8(r1)
289 REST_NVGPRS(r1)
290 b syscall_dotrace_cont
291
292 syscall_exit_work:
293 stw r6,RESULT(r1) /* Save result */
294 stw r3,GPR3(r1) /* Update return value */
295 andi. r0,r9,_TIF_SYSCALL_T_OR_A
296 beq 5f
297 ori r10,r10,MSR_EE
298 SYNC
299 MTMSRD(r10) /* re-enable interrupts */
300 lwz r4,TRAP(r1)
301 andi. r4,r4,1
302 beq 4f
303 SAVE_NVGPRS(r1)
304 li r4,0xc00
305 stw r4,TRAP(r1)
306 4:
307 addi r3,r1,STACK_FRAME_OVERHEAD
308 bl do_syscall_trace_leave
309 REST_NVGPRS(r1)
310 2:
311 lwz r3,GPR3(r1)
312 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
313 SYNC
314 MTMSRD(r10) /* disable interrupts again */
315 rlwinm r12,r1,0,0,18 /* current_thread_info() */
316 lwz r9,TI_FLAGS(r12)
317 5:
318 andi. r0,r9,_TIF_NEED_RESCHED
319 bne 1f
320 lwz r5,_MSR(r1)
321 andi. r5,r5,MSR_PR
322 beq syscall_exit_cont
323 andi. r0,r9,_TIF_SIGPENDING
324 beq syscall_exit_cont
325 b do_user_signal
326 1:
327 ori r10,r10,MSR_EE
328 SYNC
329 MTMSRD(r10) /* re-enable interrupts */
330 bl schedule
331 b 2b
332
333 #ifdef SHOW_SYSCALLS
334 do_show_syscall:
335 #ifdef SHOW_SYSCALLS_TASK
336 lis r11,show_syscalls_task@ha
337 lwz r11,show_syscalls_task@l(r11)
338 cmp 0,r2,r11
339 bnelr
340 #endif
341 stw r31,GPR31(r1)
342 mflr r31
343 lis r3,7f@ha
344 addi r3,r3,7f@l
345 lwz r4,GPR0(r1)
346 lwz r5,GPR3(r1)
347 lwz r6,GPR4(r1)
348 lwz r7,GPR5(r1)
349 lwz r8,GPR6(r1)
350 lwz r9,GPR7(r1)
351 bl printk
352 lis r3,77f@ha
353 addi r3,r3,77f@l
354 lwz r4,GPR8(r1)
355 mr r5,r2
356 bl printk
357 lwz r0,GPR0(r1)
358 lwz r3,GPR3(r1)
359 lwz r4,GPR4(r1)
360 lwz r5,GPR5(r1)
361 lwz r6,GPR6(r1)
362 lwz r7,GPR7(r1)
363 lwz r8,GPR8(r1)
364 mtlr r31
365 lwz r31,GPR31(r1)
366 blr
367
368 do_show_syscall_exit:
369 #ifdef SHOW_SYSCALLS_TASK
370 lis r11,show_syscalls_task@ha
371 lwz r11,show_syscalls_task@l(r11)
372 cmp 0,r2,r11
373 bnelr
374 #endif
375 stw r31,GPR31(r1)
376 mflr r31
377 stw r3,RESULT(r1) /* Save result */
378 mr r4,r3
379 lis r3,79f@ha
380 addi r3,r3,79f@l
381 bl printk
382 lwz r3,RESULT(r1)
383 mtlr r31
384 lwz r31,GPR31(r1)
385 blr
386
387 7: .string "syscall %d(%x, %x, %x, %x, %x, "
388 77: .string "%x), current=%p\n"
389 79: .string " -> %x\n"
390 .align 2,0
391
392 #ifdef SHOW_SYSCALLS_TASK
393 .data
394 .globl show_syscalls_task
395 show_syscalls_task:
396 .long -1
397 .text
398 #endif
399 #endif /* SHOW_SYSCALLS */
400
401 /*
402 * The sigsuspend and rt_sigsuspend system calls can call do_signal
403 * and thus put the process into the stopped state where we might
404 * want to examine its user state with ptrace. Therefore we need
405 * to save all the nonvolatile registers (r13 - r31) before calling
406 * the C code.
407 */
408 .globl ppc_sigsuspend
409 ppc_sigsuspend:
410 SAVE_NVGPRS(r1)
411 lwz r0,TRAP(r1)
412 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
413 stw r0,TRAP(r1) /* register set saved */
414 b sys_sigsuspend
415
416 .globl ppc_rt_sigsuspend
417 ppc_rt_sigsuspend:
418 SAVE_NVGPRS(r1)
419 lwz r0,TRAP(r1)
420 rlwinm r0,r0,0,0,30
421 stw r0,TRAP(r1)
422 b sys_rt_sigsuspend
423
424 .globl ppc_fork
425 ppc_fork:
426 SAVE_NVGPRS(r1)
427 lwz r0,TRAP(r1)
428 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
429 stw r0,TRAP(r1) /* register set saved */
430 b sys_fork
431
432 .globl ppc_vfork
433 ppc_vfork:
434 SAVE_NVGPRS(r1)
435 lwz r0,TRAP(r1)
436 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
437 stw r0,TRAP(r1) /* register set saved */
438 b sys_vfork
439
440 .globl ppc_clone
441 ppc_clone:
442 SAVE_NVGPRS(r1)
443 lwz r0,TRAP(r1)
444 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
445 stw r0,TRAP(r1) /* register set saved */
446 b sys_clone
447
448 .globl ppc_swapcontext
449 ppc_swapcontext:
450 SAVE_NVGPRS(r1)
451 lwz r0,TRAP(r1)
452 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
453 stw r0,TRAP(r1) /* register set saved */
454 b sys_swapcontext
455
456 /*
457 * Top-level page fault handling.
458 * This is in assembler because if do_page_fault tells us that
459 * it is a bad kernel page fault, we want to save the non-volatile
460 * registers before calling bad_page_fault.
461 */
462 .globl handle_page_fault
463 handle_page_fault:
464 stw r4,_DAR(r1)
465 addi r3,r1,STACK_FRAME_OVERHEAD
466 bl do_page_fault
467 cmpwi r3,0
468 beq+ ret_from_except
469 SAVE_NVGPRS(r1)
470 lwz r0,TRAP(r1)
471 clrrwi r0,r0,1
472 stw r0,TRAP(r1)
473 mr r5,r3
474 addi r3,r1,STACK_FRAME_OVERHEAD
475 lwz r4,_DAR(r1)
476 bl bad_page_fault
477 b ret_from_except_full
478
479 /*
480 * This routine switches between two different tasks. The process
481 * state of one is saved on its kernel stack. Then the state
482 * of the other is restored from its kernel stack. The memory
483 * management hardware is updated to the second process's state.
484 * Finally, we can return to the second process.
485 * On entry, r3 points to the THREAD for the current task, r4
486 * points to the THREAD for the new task.
487 *
488 * This routine is always called with interrupts disabled.
489 *
490 * Note: there are two ways to get to the "going out" portion
491 * of this code; either by coming in via the entry (_switch)
492 * or via "fork" which must set up an environment equivalent
493 * to the "_switch" path. If you change this , you'll have to
494 * change the fork code also.
495 *
496 * The code which creates the new task context is in 'copy_thread'
497 * in arch/ppc/kernel/process.c
498 */
499 _GLOBAL(_switch)
500 stwu r1,-INT_FRAME_SIZE(r1)
501 mflr r0
502 stw r0,INT_FRAME_SIZE+4(r1)
503 /* r3-r12 are caller saved -- Cort */
504 SAVE_NVGPRS(r1)
505 stw r0,_NIP(r1) /* Return to switch caller */
506 mfmsr r11
507 li r0,MSR_FP /* Disable floating-point */
508 #ifdef CONFIG_ALTIVEC
509 BEGIN_FTR_SECTION
510 oris r0,r0,MSR_VEC@h /* Disable altivec */
511 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
512 stw r12,THREAD+THREAD_VRSAVE(r2)
513 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
514 #endif /* CONFIG_ALTIVEC */
515 #ifdef CONFIG_SPE
516 oris r0,r0,MSR_SPE@h /* Disable SPE */
517 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
518 stw r12,THREAD+THREAD_SPEFSCR(r2)
519 #endif /* CONFIG_SPE */
520 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
521 beq+ 1f
522 andc r11,r11,r0
523 MTMSRD(r11)
524 isync
525 1: stw r11,_MSR(r1)
526 mfcr r10
527 stw r10,_CCR(r1)
528 stw r1,KSP(r3) /* Set old stack pointer */
529
530 #ifdef CONFIG_SMP
531 /* We need a sync somewhere here to make sure that if the
532 * previous task gets rescheduled on another CPU, it sees all
533 * stores it has performed on this one.
534 */
535 sync
536 #endif /* CONFIG_SMP */
537
538 tophys(r0,r4)
539 CLR_TOP32(r0)
540 mtspr SPRN_SPRG3,r0 /* Update current THREAD phys addr */
541 lwz r1,KSP(r4) /* Load new stack pointer */
542
543 /* save the old current 'last' for return value */
544 mr r3,r2
545 addi r2,r4,-THREAD /* Update current */
546
547 #ifdef CONFIG_ALTIVEC
548 BEGIN_FTR_SECTION
549 lwz r0,THREAD+THREAD_VRSAVE(r2)
550 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
551 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
552 #endif /* CONFIG_ALTIVEC */
553 #ifdef CONFIG_SPE
554 lwz r0,THREAD+THREAD_SPEFSCR(r2)
555 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
556 #endif /* CONFIG_SPE */
557
558 lwz r0,_CCR(r1)
559 mtcrf 0xFF,r0
560 /* r3-r12 are destroyed -- Cort */
561 REST_NVGPRS(r1)
562
563 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
564 mtlr r4
565 addi r1,r1,INT_FRAME_SIZE
566 blr
567
568 .globl fast_exception_return
569 fast_exception_return:
570 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
571 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
572 beq 1f /* if not, we've got problems */
573 #endif
574
575 2: REST_4GPRS(3, r11)
576 lwz r10,_CCR(r11)
577 REST_GPR(1, r11)
578 mtcr r10
579 lwz r10,_LINK(r11)
580 mtlr r10
581 REST_GPR(10, r11)
582 mtspr SPRN_SRR1,r9
583 mtspr SPRN_SRR0,r12
584 REST_GPR(9, r11)
585 REST_GPR(12, r11)
586 lwz r11,GPR11(r11)
587 SYNC
588 RFI
589
590 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
591 /* check if the exception happened in a restartable section */
592 1: lis r3,exc_exit_restart_end@ha
593 addi r3,r3,exc_exit_restart_end@l
594 cmplw r12,r3
595 bge 3f
596 lis r4,exc_exit_restart@ha
597 addi r4,r4,exc_exit_restart@l
598 cmplw r12,r4
599 blt 3f
600 lis r3,fee_restarts@ha
601 tophys(r3,r3)
602 lwz r5,fee_restarts@l(r3)
603 addi r5,r5,1
604 stw r5,fee_restarts@l(r3)
605 mr r12,r4 /* restart at exc_exit_restart */
606 b 2b
607
608 .comm fee_restarts,4
609
610 /* aargh, a nonrecoverable interrupt, panic */
611 /* aargh, we don't know which trap this is */
612 /* but the 601 doesn't implement the RI bit, so assume it's OK */
613 3:
614 BEGIN_FTR_SECTION
615 b 2b
616 END_FTR_SECTION_IFSET(CPU_FTR_601)
617 li r10,-1
618 stw r10,TRAP(r11)
619 addi r3,r1,STACK_FRAME_OVERHEAD
620 lis r10,MSR_KERNEL@h
621 ori r10,r10,MSR_KERNEL@l
622 bl transfer_to_handler_full
623 .long nonrecoverable_exception
624 .long ret_from_except
625 #endif
626
627 .globl sigreturn_exit
628 sigreturn_exit:
629 subi r1,r3,STACK_FRAME_OVERHEAD
630 rlwinm r12,r1,0,0,18 /* current_thread_info() */
631 lwz r9,TI_FLAGS(r12)
632 andi. r0,r9,_TIF_SYSCALL_T_OR_A
633 bnel- do_syscall_trace_leave
634 /* fall through */
635
636 .globl ret_from_except_full
637 ret_from_except_full:
638 REST_NVGPRS(r1)
639 /* fall through */
640
641 .globl ret_from_except
642 ret_from_except:
643 /* Hard-disable interrupts so that current_thread_info()->flags
644 * can't change between when we test it and when we return
645 * from the interrupt. */
646 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
647 SYNC /* Some chip revs have problems here... */
648 MTMSRD(r10) /* disable interrupts */
649
650 lwz r3,_MSR(r1) /* Returning to user mode? */
651 andi. r0,r3,MSR_PR
652 beq resume_kernel
653
654 user_exc_return: /* r10 contains MSR_KERNEL here */
655 /* Check current_thread_info()->flags */
656 rlwinm r9,r1,0,0,18
657 lwz r9,TI_FLAGS(r9)
658 andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED)
659 bne do_work
660
661 restore_user:
662 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
663 /* Check whether this process has its own DBCR0 value. The single
664 step bit tells us that dbcr0 should be loaded. */
665 lwz r0,THREAD+THREAD_DBCR0(r2)
666 andis. r10,r0,DBCR0_IC@h
667 bnel- load_dbcr0
668 #endif
669
670 #ifdef CONFIG_PREEMPT
671 b restore
672
673 /* N.B. the only way to get here is from the beq following ret_from_except. */
674 resume_kernel:
675 /* check current_thread_info->preempt_count */
676 rlwinm r9,r1,0,0,18
677 lwz r0,TI_PREEMPT(r9)
678 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
679 bne restore
680 lwz r0,TI_FLAGS(r9)
681 andi. r0,r0,_TIF_NEED_RESCHED
682 beq+ restore
683 andi. r0,r3,MSR_EE /* interrupts off? */
684 beq restore /* don't schedule if so */
685 1: bl preempt_schedule_irq
686 rlwinm r9,r1,0,0,18
687 lwz r3,TI_FLAGS(r9)
688 andi. r0,r3,_TIF_NEED_RESCHED
689 bne- 1b
690 #else
691 resume_kernel:
692 #endif /* CONFIG_PREEMPT */
693
694 /* interrupts are hard-disabled at this point */
695 restore:
696 lwz r0,GPR0(r1)
697 lwz r2,GPR2(r1)
698 REST_4GPRS(3, r1)
699 REST_2GPRS(7, r1)
700
701 lwz r10,_XER(r1)
702 lwz r11,_CTR(r1)
703 mtspr SPRN_XER,r10
704 mtctr r11
705
706 PPC405_ERR77(0,r1)
707 stwcx. r0,0,r1 /* to clear the reservation */
708
709 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
710 lwz r9,_MSR(r1)
711 andi. r10,r9,MSR_RI /* check if this exception occurred */
712 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
713
714 lwz r10,_CCR(r1)
715 lwz r11,_LINK(r1)
716 mtcrf 0xFF,r10
717 mtlr r11
718
719 /*
720 * Once we put values in SRR0 and SRR1, we are in a state
721 * where exceptions are not recoverable, since taking an
722 * exception will trash SRR0 and SRR1. Therefore we clear the
723 * MSR:RI bit to indicate this. If we do take an exception,
724 * we can't return to the point of the exception but we
725 * can restart the exception exit path at the label
726 * exc_exit_restart below. -- paulus
727 */
728 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
729 SYNC
730 MTMSRD(r10) /* clear the RI bit */
731 .globl exc_exit_restart
732 exc_exit_restart:
733 lwz r9,_MSR(r1)
734 lwz r12,_NIP(r1)
735 FIX_SRR1(r9,r10)
736 mtspr SPRN_SRR0,r12
737 mtspr SPRN_SRR1,r9
738 REST_4GPRS(9, r1)
739 lwz r1,GPR1(r1)
740 .globl exc_exit_restart_end
741 exc_exit_restart_end:
742 SYNC
743 RFI
744
745 #else /* !(CONFIG_4xx || CONFIG_BOOKE) */
746 /*
747 * This is a bit different on 4xx/Book-E because it doesn't have
748 * the RI bit in the MSR.
749 * The TLB miss handler checks if we have interrupted
750 * the exception exit path and restarts it if so
751 * (well maybe one day it will... :).
752 */
753 lwz r11,_LINK(r1)
754 mtlr r11
755 lwz r10,_CCR(r1)
756 mtcrf 0xff,r10
757 REST_2GPRS(9, r1)
758 .globl exc_exit_restart
759 exc_exit_restart:
760 lwz r11,_NIP(r1)
761 lwz r12,_MSR(r1)
762 exc_exit_start:
763 mtspr SPRN_SRR0,r11
764 mtspr SPRN_SRR1,r12
765 REST_2GPRS(11, r1)
766 lwz r1,GPR1(r1)
767 .globl exc_exit_restart_end
768 exc_exit_restart_end:
769 PPC405_ERR77_SYNC
770 rfi
771 b . /* prevent prefetch past rfi */
772
773 /*
774 * Returning from a critical interrupt in user mode doesn't need
775 * to be any different from a normal exception. For a critical
776 * interrupt in the kernel, we just return (without checking for
777 * preemption) since the interrupt may have happened at some crucial
778 * place (e.g. inside the TLB miss handler), and because we will be
779 * running with r1 pointing into critical_stack, not the current
780 * process's kernel stack (and therefore current_thread_info() will
781 * give the wrong answer).
782 * We have to restore various SPRs that may have been in use at the
783 * time of the critical interrupt.
784 *
785 */
786 .globl ret_from_crit_exc
787 ret_from_crit_exc:
788 REST_NVGPRS(r1)
789 lwz r3,_MSR(r1)
790 andi. r3,r3,MSR_PR
791 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
792 bne user_exc_return
793
794 lwz r0,GPR0(r1)
795 lwz r2,GPR2(r1)
796 REST_4GPRS(3, r1)
797 REST_2GPRS(7, r1)
798
799 lwz r10,_XER(r1)
800 lwz r11,_CTR(r1)
801 mtspr SPRN_XER,r10
802 mtctr r11
803
804 PPC405_ERR77(0,r1)
805 stwcx. r0,0,r1 /* to clear the reservation */
806
807 lwz r11,_LINK(r1)
808 mtlr r11
809 lwz r10,_CCR(r1)
810 mtcrf 0xff,r10
811 #ifdef CONFIG_40x
812 /* avoid any possible TLB misses here by turning off MSR.DR, we
813 * assume the instructions here are mapped by a pinned TLB entry */
814 li r10,MSR_IR
815 mtmsr r10
816 isync
817 tophys(r1, r1)
818 #endif
819 lwz r9,_DEAR(r1)
820 lwz r10,_ESR(r1)
821 mtspr SPRN_DEAR,r9
822 mtspr SPRN_ESR,r10
823 lwz r11,_NIP(r1)
824 lwz r12,_MSR(r1)
825 mtspr SPRN_CSRR0,r11
826 mtspr SPRN_CSRR1,r12
827 lwz r9,GPR9(r1)
828 lwz r12,GPR12(r1)
829 lwz r10,GPR10(r1)
830 lwz r11,GPR11(r1)
831 lwz r1,GPR1(r1)
832 PPC405_ERR77_SYNC
833 rfci
834 b . /* prevent prefetch past rfci */
835
836 #ifdef CONFIG_BOOKE
837 /*
838 * Return from a machine check interrupt, similar to a critical
839 * interrupt.
840 */
841 .globl ret_from_mcheck_exc
842 ret_from_mcheck_exc:
843 REST_NVGPRS(r1)
844 lwz r3,_MSR(r1)
845 andi. r3,r3,MSR_PR
846 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
847 bne user_exc_return
848
849 lwz r0,GPR0(r1)
850 lwz r2,GPR2(r1)
851 REST_4GPRS(3, r1)
852 REST_2GPRS(7, r1)
853
854 lwz r10,_XER(r1)
855 lwz r11,_CTR(r1)
856 mtspr SPRN_XER,r10
857 mtctr r11
858
859 stwcx. r0,0,r1 /* to clear the reservation */
860
861 lwz r11,_LINK(r1)
862 mtlr r11
863 lwz r10,_CCR(r1)
864 mtcrf 0xff,r10
865 lwz r9,_DEAR(r1)
866 lwz r10,_ESR(r1)
867 mtspr SPRN_DEAR,r9
868 mtspr SPRN_ESR,r10
869 lwz r11,_NIP(r1)
870 lwz r12,_MSR(r1)
871 mtspr SPRN_MCSRR0,r11
872 mtspr SPRN_MCSRR1,r12
873 lwz r9,GPR9(r1)
874 lwz r12,GPR12(r1)
875 lwz r10,GPR10(r1)
876 lwz r11,GPR11(r1)
877 lwz r1,GPR1(r1)
878 RFMCI
879 #endif /* CONFIG_BOOKE */
880
881 /*
882 * Load the DBCR0 value for a task that is being ptraced,
883 * having first saved away the global DBCR0. Note that r0
884 * has the dbcr0 value to set upon entry to this.
885 */
886 load_dbcr0:
887 mfmsr r10 /* first disable debug exceptions */
888 rlwinm r10,r10,0,~MSR_DE
889 mtmsr r10
890 isync
891 mfspr r10,SPRN_DBCR0
892 lis r11,global_dbcr0@ha
893 addi r11,r11,global_dbcr0@l
894 stw r10,0(r11)
895 mtspr SPRN_DBCR0,r0
896 lwz r10,4(r11)
897 addi r10,r10,1
898 stw r10,4(r11)
899 li r11,-1
900 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
901 blr
902
903 .comm global_dbcr0,8
904 #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
905
906 do_work: /* r10 contains MSR_KERNEL here */
907 andi. r0,r9,_TIF_NEED_RESCHED
908 beq do_user_signal
909
910 do_resched: /* r10 contains MSR_KERNEL here */
911 ori r10,r10,MSR_EE
912 SYNC
913 MTMSRD(r10) /* hard-enable interrupts */
914 bl schedule
915 recheck:
916 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
917 SYNC
918 MTMSRD(r10) /* disable interrupts */
919 rlwinm r9,r1,0,0,18
920 lwz r9,TI_FLAGS(r9)
921 andi. r0,r9,_TIF_NEED_RESCHED
922 bne- do_resched
923 andi. r0,r9,_TIF_SIGPENDING
924 beq restore_user
925 do_user_signal: /* r10 contains MSR_KERNEL here */
926 ori r10,r10,MSR_EE
927 SYNC
928 MTMSRD(r10) /* hard-enable interrupts */
929 /* save r13-r31 in the exception frame, if not already done */
930 lwz r3,TRAP(r1)
931 andi. r0,r3,1
932 beq 2f
933 SAVE_NVGPRS(r1)
934 rlwinm r3,r3,0,0,30
935 stw r3,TRAP(r1)
936 2: li r3,0
937 addi r4,r1,STACK_FRAME_OVERHEAD
938 bl do_signal
939 REST_NVGPRS(r1)
940 b recheck
941
942 /*
943 * We come here when we are at the end of handling an exception
944 * that occurred at a place where taking an exception will lose
945 * state information, such as the contents of SRR0 and SRR1.
946 */
947 nonrecoverable:
948 lis r10,exc_exit_restart_end@ha
949 addi r10,r10,exc_exit_restart_end@l
950 cmplw r12,r10
951 bge 3f
952 lis r11,exc_exit_restart@ha
953 addi r11,r11,exc_exit_restart@l
954 cmplw r12,r11
955 blt 3f
956 lis r10,ee_restarts@ha
957 lwz r12,ee_restarts@l(r10)
958 addi r12,r12,1
959 stw r12,ee_restarts@l(r10)
960 mr r12,r11 /* restart at exc_exit_restart */
961 blr
962 3: /* OK, we can't recover, kill this process */
963 /* but the 601 doesn't implement the RI bit, so assume it's OK */
964 BEGIN_FTR_SECTION
965 blr
966 END_FTR_SECTION_IFSET(CPU_FTR_601)
967 lwz r3,TRAP(r1)
968 andi. r0,r3,1
969 beq 4f
970 SAVE_NVGPRS(r1)
971 rlwinm r3,r3,0,0,30
972 stw r3,TRAP(r1)
973 4: addi r3,r1,STACK_FRAME_OVERHEAD
974 bl nonrecoverable_exception
975 /* shouldn't return */
976 b 4b
977
978 .comm ee_restarts,4
979
980 /*
981 * PROM code for specific machines follows. Put it
982 * here so it's easy to add arch-specific sections later.
983 * -- Cort
984 */
985 #ifdef CONFIG_PPC_OF
986 /*
987 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
988 * called with the MMU off.
989 */
990 _GLOBAL(enter_rtas)
991 stwu r1,-INT_FRAME_SIZE(r1)
992 mflr r0
993 stw r0,INT_FRAME_SIZE+4(r1)
994 lis r4,rtas_data@ha
995 lwz r4,rtas_data@l(r4)
996 lis r6,1f@ha /* physical return address for rtas */
997 addi r6,r6,1f@l
998 tophys(r6,r6)
999 tophys(r7,r1)
1000 lis r8,rtas_entry@ha
1001 lwz r8,rtas_entry@l(r8)
1002 mfmsr r9
1003 stw r9,8(r1)
1004 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1005 SYNC /* disable interrupts so SRR0/1 */
1006 MTMSRD(r0) /* don't get trashed */
1007 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1008 mtlr r6
1009 CLR_TOP32(r7)
1010 mtspr SPRN_SPRG2,r7
1011 mtspr SPRN_SRR0,r8
1012 mtspr SPRN_SRR1,r9
1013 RFI
1014 1: tophys(r9,r1)
1015 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
1016 lwz r9,8(r9) /* original msr value */
1017 FIX_SRR1(r9,r0)
1018 addi r1,r1,INT_FRAME_SIZE
1019 li r0,0
1020 mtspr SPRN_SPRG2,r0
1021 mtspr SPRN_SRR0,r8
1022 mtspr SPRN_SRR1,r9
1023 RFI /* return to caller */
1024
1025 .globl machine_check_in_rtas
1026 machine_check_in_rtas:
1027 twi 31,0,0
1028 /* XXX load up BATs and panic */
1029
1030 #endif /* CONFIG_PPC_OF */
This page took 0.054022 seconds and 6 git commands to generate.