powerpc: Fix a branch-too-far link error for 32-bit targets
[deliverable/linux.git] / arch / powerpc / kernel / entry_32.S
1 /*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
22 #include <linux/config.h>
23 #include <linux/errno.h>
24 #include <linux/sys.h>
25 #include <linux/threads.h>
26 #include <asm/reg.h>
27 #include <asm/page.h>
28 #include <asm/mmu.h>
29 #include <asm/cputable.h>
30 #include <asm/thread_info.h>
31 #include <asm/ppc_asm.h>
32 #include <asm/asm-offsets.h>
33 #include <asm/unistd.h>
34
35 #undef SHOW_SYSCALLS
36 #undef SHOW_SYSCALLS_TASK
37
38 /*
39 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
40 */
41 #if MSR_KERNEL >= 0x10000
42 #define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
43 #else
44 #define LOAD_MSR_KERNEL(r, x) li r,(x)
45 #endif
46
47 #ifdef CONFIG_BOOKE
48 #include "head_booke.h"
49 #define TRANSFER_TO_HANDLER_EXC_LEVEL(exc_level) \
50 mtspr exc_level##_SPRG,r8; \
51 BOOKE_LOAD_EXC_LEVEL_STACK(exc_level); \
52 lwz r0,GPR10-INT_FRAME_SIZE(r8); \
53 stw r0,GPR10(r11); \
54 lwz r0,GPR11-INT_FRAME_SIZE(r8); \
55 stw r0,GPR11(r11); \
56 mfspr r8,exc_level##_SPRG
57
58 .globl mcheck_transfer_to_handler
59 mcheck_transfer_to_handler:
60 TRANSFER_TO_HANDLER_EXC_LEVEL(MCHECK)
61 b transfer_to_handler_full
62
63 .globl debug_transfer_to_handler
64 debug_transfer_to_handler:
65 TRANSFER_TO_HANDLER_EXC_LEVEL(DEBUG)
66 b transfer_to_handler_full
67
68 .globl crit_transfer_to_handler
69 crit_transfer_to_handler:
70 TRANSFER_TO_HANDLER_EXC_LEVEL(CRIT)
71 /* fall through */
72 #endif
73
74 #ifdef CONFIG_40x
75 .globl crit_transfer_to_handler
76 crit_transfer_to_handler:
77 lwz r0,crit_r10@l(0)
78 stw r0,GPR10(r11)
79 lwz r0,crit_r11@l(0)
80 stw r0,GPR11(r11)
81 /* fall through */
82 #endif
83
84 /*
85 * This code finishes saving the registers to the exception frame
86 * and jumps to the appropriate handler for the exception, turning
87 * on address translation.
88 * Note that we rely on the caller having set cr0.eq iff the exception
89 * occurred in kernel mode (i.e. MSR:PR = 0).
90 */
91 .globl transfer_to_handler_full
92 transfer_to_handler_full:
93 SAVE_NVGPRS(r11)
94 /* fall through */
95
96 .globl transfer_to_handler
97 transfer_to_handler:
98 stw r2,GPR2(r11)
99 stw r12,_NIP(r11)
100 stw r9,_MSR(r11)
101 andi. r2,r9,MSR_PR
102 mfctr r12
103 mfspr r2,SPRN_XER
104 stw r12,_CTR(r11)
105 stw r2,_XER(r11)
106 mfspr r12,SPRN_SPRG3
107 addi r2,r12,-THREAD
108 tovirt(r2,r2) /* set r2 to current */
109 beq 2f /* if from user, fix up THREAD.regs */
110 addi r11,r1,STACK_FRAME_OVERHEAD
111 stw r11,PT_REGS(r12)
112 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
113 /* Check to see if the dbcr0 register is set up to debug. Use the
114 single-step bit to do this. */
115 lwz r12,THREAD_DBCR0(r12)
116 andis. r12,r12,DBCR0_IC@h
117 beq+ 3f
118 /* From user and task is ptraced - load up global dbcr0 */
119 li r12,-1 /* clear all pending debug events */
120 mtspr SPRN_DBSR,r12
121 lis r11,global_dbcr0@ha
122 tophys(r11,r11)
123 addi r11,r11,global_dbcr0@l
124 lwz r12,0(r11)
125 mtspr SPRN_DBCR0,r12
126 lwz r12,4(r11)
127 addi r12,r12,-1
128 stw r12,4(r11)
129 #endif
130 b 3f
131 2: /* if from kernel, check interrupted DOZE/NAP mode and
132 * check for stack overflow
133 */
134 #ifdef CONFIG_6xx
135 mfspr r11,SPRN_HID0
136 mtcr r11
137 BEGIN_FTR_SECTION
138 bt- 8,power_save_6xx_restore /* Check DOZE */
139 END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
140 BEGIN_FTR_SECTION
141 bt- 9,power_save_6xx_restore /* Check NAP */
142 END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
143 #endif /* CONFIG_6xx */
144 .globl transfer_to_handler_cont
145 transfer_to_handler_cont:
146 lwz r11,THREAD_INFO-THREAD(r12)
147 cmplw r1,r11 /* if r1 <= current->thread_info */
148 ble- stack_ovf /* then the kernel stack overflowed */
149 3:
150 mflr r9
151 lwz r11,0(r9) /* virtual address of handler */
152 lwz r9,4(r9) /* where to go when done */
153 FIX_SRR1(r10,r12)
154 mtspr SPRN_SRR0,r11
155 mtspr SPRN_SRR1,r10
156 mtlr r9
157 SYNC
158 RFI /* jump to handler, enable MMU */
159
160 /*
161 * On kernel stack overflow, load up an initial stack pointer
162 * and call StackOverflow(regs), which should not return.
163 */
164 stack_ovf:
165 /* sometimes we use a statically-allocated stack, which is OK. */
166 lis r11,_end@h
167 ori r11,r11,_end@l
168 cmplw r1,r11
169 ble 3b /* r1 <= &_end is OK */
170 SAVE_NVGPRS(r11)
171 addi r3,r1,STACK_FRAME_OVERHEAD
172 lis r1,init_thread_union@ha
173 addi r1,r1,init_thread_union@l
174 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
175 lis r9,StackOverflow@ha
176 addi r9,r9,StackOverflow@l
177 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
178 FIX_SRR1(r10,r12)
179 mtspr SPRN_SRR0,r9
180 mtspr SPRN_SRR1,r10
181 SYNC
182 RFI
183
184 /*
185 * Handle a system call.
186 */
187 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
188 .stabs "entry_32.S",N_SO,0,0,0f
189 0:
190
191 _GLOBAL(DoSyscall)
192 stw r0,THREAD+LAST_SYSCALL(r2)
193 stw r3,ORIG_GPR3(r1)
194 li r12,0
195 stw r12,RESULT(r1)
196 lwz r11,_CCR(r1) /* Clear SO bit in CR */
197 rlwinm r11,r11,0,4,2
198 stw r11,_CCR(r1)
199 #ifdef SHOW_SYSCALLS
200 bl do_show_syscall
201 #endif /* SHOW_SYSCALLS */
202 rlwinm r10,r1,0,0,18 /* current_thread_info() */
203 lwz r11,TI_LOCAL_FLAGS(r10)
204 rlwinm r11,r11,0,~_TIFL_FORCE_NOERROR
205 stw r11,TI_LOCAL_FLAGS(r10)
206 lwz r11,TI_FLAGS(r10)
207 andi. r11,r11,_TIF_SYSCALL_T_OR_A
208 bne- syscall_dotrace
209 syscall_dotrace_cont:
210 cmplwi 0,r0,NR_syscalls
211 lis r10,sys_call_table@h
212 ori r10,r10,sys_call_table@l
213 slwi r0,r0,2
214 bge- 66f
215 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
216 mtlr r10
217 addi r9,r1,STACK_FRAME_OVERHEAD
218 PPC440EP_ERR42
219 blrl /* Call handler */
220 .globl ret_from_syscall
221 ret_from_syscall:
222 #ifdef SHOW_SYSCALLS
223 bl do_show_syscall_exit
224 #endif
225 mr r6,r3
226 li r11,-_LAST_ERRNO
227 cmplw 0,r3,r11
228 rlwinm r12,r1,0,0,18 /* current_thread_info() */
229 blt+ 30f
230 lwz r11,TI_LOCAL_FLAGS(r12)
231 andi. r11,r11,_TIFL_FORCE_NOERROR
232 bne 30f
233 neg r3,r3
234 lwz r10,_CCR(r1) /* Set SO bit in CR */
235 oris r10,r10,0x1000
236 stw r10,_CCR(r1)
237
238 /* disable interrupts so current_thread_info()->flags can't change */
239 30: LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
240 SYNC
241 MTMSRD(r10)
242 lwz r9,TI_FLAGS(r12)
243 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
244 bne- syscall_exit_work
245 syscall_exit_cont:
246 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
247 /* If the process has its own DBCR0 value, load it up. The single
248 step bit tells us that dbcr0 should be loaded. */
249 lwz r0,THREAD+THREAD_DBCR0(r2)
250 andis. r10,r0,DBCR0_IC@h
251 bnel- load_dbcr0
252 #endif
253 stwcx. r0,0,r1 /* to clear the reservation */
254 lwz r4,_LINK(r1)
255 lwz r5,_CCR(r1)
256 mtlr r4
257 mtcr r5
258 lwz r7,_NIP(r1)
259 lwz r8,_MSR(r1)
260 FIX_SRR1(r8, r0)
261 lwz r2,GPR2(r1)
262 lwz r1,GPR1(r1)
263 mtspr SPRN_SRR0,r7
264 mtspr SPRN_SRR1,r8
265 SYNC
266 RFI
267
268 66: li r3,-ENOSYS
269 b ret_from_syscall
270
271 .globl ret_from_fork
272 ret_from_fork:
273 REST_NVGPRS(r1)
274 bl schedule_tail
275 li r3,0
276 b ret_from_syscall
277
278 /* Traced system call support */
279 syscall_dotrace:
280 SAVE_NVGPRS(r1)
281 li r0,0xc00
282 stw r0,TRAP(r1)
283 addi r3,r1,STACK_FRAME_OVERHEAD
284 bl do_syscall_trace_enter
285 lwz r0,GPR0(r1) /* Restore original registers */
286 lwz r3,GPR3(r1)
287 lwz r4,GPR4(r1)
288 lwz r5,GPR5(r1)
289 lwz r6,GPR6(r1)
290 lwz r7,GPR7(r1)
291 lwz r8,GPR8(r1)
292 REST_NVGPRS(r1)
293 b syscall_dotrace_cont
294
295 syscall_exit_work:
296 stw r6,RESULT(r1) /* Save result */
297 stw r3,GPR3(r1) /* Update return value */
298 andi. r0,r9,_TIF_SYSCALL_T_OR_A
299 beq 5f
300 ori r10,r10,MSR_EE
301 SYNC
302 MTMSRD(r10) /* re-enable interrupts */
303 lwz r4,TRAP(r1)
304 andi. r4,r4,1
305 beq 4f
306 SAVE_NVGPRS(r1)
307 li r4,0xc00
308 stw r4,TRAP(r1)
309 4:
310 addi r3,r1,STACK_FRAME_OVERHEAD
311 bl do_syscall_trace_leave
312 REST_NVGPRS(r1)
313 2:
314 lwz r3,GPR3(r1)
315 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
316 SYNC
317 MTMSRD(r10) /* disable interrupts again */
318 rlwinm r12,r1,0,0,18 /* current_thread_info() */
319 lwz r9,TI_FLAGS(r12)
320 5:
321 andi. r0,r9,_TIF_NEED_RESCHED
322 bne 1f
323 lwz r5,_MSR(r1)
324 andi. r5,r5,MSR_PR
325 beq syscall_exit_cont
326 andi. r0,r9,_TIF_SIGPENDING
327 beq syscall_exit_cont
328 b do_user_signal
329 1:
330 ori r10,r10,MSR_EE
331 SYNC
332 MTMSRD(r10) /* re-enable interrupts */
333 bl schedule
334 b 2b
335
336 #ifdef SHOW_SYSCALLS
337 do_show_syscall:
338 #ifdef SHOW_SYSCALLS_TASK
339 lis r11,show_syscalls_task@ha
340 lwz r11,show_syscalls_task@l(r11)
341 cmp 0,r2,r11
342 bnelr
343 #endif
344 stw r31,GPR31(r1)
345 mflr r31
346 lis r3,7f@ha
347 addi r3,r3,7f@l
348 lwz r4,GPR0(r1)
349 lwz r5,GPR3(r1)
350 lwz r6,GPR4(r1)
351 lwz r7,GPR5(r1)
352 lwz r8,GPR6(r1)
353 lwz r9,GPR7(r1)
354 bl printk
355 lis r3,77f@ha
356 addi r3,r3,77f@l
357 lwz r4,GPR8(r1)
358 mr r5,r2
359 bl printk
360 lwz r0,GPR0(r1)
361 lwz r3,GPR3(r1)
362 lwz r4,GPR4(r1)
363 lwz r5,GPR5(r1)
364 lwz r6,GPR6(r1)
365 lwz r7,GPR7(r1)
366 lwz r8,GPR8(r1)
367 mtlr r31
368 lwz r31,GPR31(r1)
369 blr
370
371 do_show_syscall_exit:
372 #ifdef SHOW_SYSCALLS_TASK
373 lis r11,show_syscalls_task@ha
374 lwz r11,show_syscalls_task@l(r11)
375 cmp 0,r2,r11
376 bnelr
377 #endif
378 stw r31,GPR31(r1)
379 mflr r31
380 stw r3,RESULT(r1) /* Save result */
381 mr r4,r3
382 lis r3,79f@ha
383 addi r3,r3,79f@l
384 bl printk
385 lwz r3,RESULT(r1)
386 mtlr r31
387 lwz r31,GPR31(r1)
388 blr
389
390 7: .string "syscall %d(%x, %x, %x, %x, %x, "
391 77: .string "%x), current=%p\n"
392 79: .string " -> %x\n"
393 .align 2,0
394
395 #ifdef SHOW_SYSCALLS_TASK
396 .data
397 .globl show_syscalls_task
398 show_syscalls_task:
399 .long -1
400 .text
401 #endif
402 #endif /* SHOW_SYSCALLS */
403
404 /*
405 * The sigsuspend and rt_sigsuspend system calls can call do_signal
406 * and thus put the process into the stopped state where we might
407 * want to examine its user state with ptrace. Therefore we need
408 * to save all the nonvolatile registers (r13 - r31) before calling
409 * the C code.
410 */
411 .globl ppc_sigsuspend
412 ppc_sigsuspend:
413 SAVE_NVGPRS(r1)
414 lwz r0,TRAP(r1)
415 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
416 stw r0,TRAP(r1) /* register set saved */
417 b sys_sigsuspend
418
419 .globl ppc_rt_sigsuspend
420 ppc_rt_sigsuspend:
421 SAVE_NVGPRS(r1)
422 lwz r0,TRAP(r1)
423 rlwinm r0,r0,0,0,30
424 stw r0,TRAP(r1)
425 b sys_rt_sigsuspend
426
427 .globl ppc_fork
428 ppc_fork:
429 SAVE_NVGPRS(r1)
430 lwz r0,TRAP(r1)
431 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
432 stw r0,TRAP(r1) /* register set saved */
433 b sys_fork
434
435 .globl ppc_vfork
436 ppc_vfork:
437 SAVE_NVGPRS(r1)
438 lwz r0,TRAP(r1)
439 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
440 stw r0,TRAP(r1) /* register set saved */
441 b sys_vfork
442
443 .globl ppc_clone
444 ppc_clone:
445 SAVE_NVGPRS(r1)
446 lwz r0,TRAP(r1)
447 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
448 stw r0,TRAP(r1) /* register set saved */
449 b sys_clone
450
451 .globl ppc_swapcontext
452 ppc_swapcontext:
453 SAVE_NVGPRS(r1)
454 lwz r0,TRAP(r1)
455 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
456 stw r0,TRAP(r1) /* register set saved */
457 b sys_swapcontext
458
459 /*
460 * Top-level page fault handling.
461 * This is in assembler because if do_page_fault tells us that
462 * it is a bad kernel page fault, we want to save the non-volatile
463 * registers before calling bad_page_fault.
464 */
465 .globl handle_page_fault
466 handle_page_fault:
467 stw r4,_DAR(r1)
468 addi r3,r1,STACK_FRAME_OVERHEAD
469 bl do_page_fault
470 cmpwi r3,0
471 beq+ ret_from_except
472 SAVE_NVGPRS(r1)
473 lwz r0,TRAP(r1)
474 clrrwi r0,r0,1
475 stw r0,TRAP(r1)
476 mr r5,r3
477 addi r3,r1,STACK_FRAME_OVERHEAD
478 lwz r4,_DAR(r1)
479 bl bad_page_fault
480 b ret_from_except_full
481
482 /*
483 * This routine switches between two different tasks. The process
484 * state of one is saved on its kernel stack. Then the state
485 * of the other is restored from its kernel stack. The memory
486 * management hardware is updated to the second process's state.
487 * Finally, we can return to the second process.
488 * On entry, r3 points to the THREAD for the current task, r4
489 * points to the THREAD for the new task.
490 *
491 * This routine is always called with interrupts disabled.
492 *
493 * Note: there are two ways to get to the "going out" portion
494 * of this code; either by coming in via the entry (_switch)
495 * or via "fork" which must set up an environment equivalent
496 * to the "_switch" path. If you change this , you'll have to
497 * change the fork code also.
498 *
499 * The code which creates the new task context is in 'copy_thread'
500 * in arch/ppc/kernel/process.c
501 */
502 _GLOBAL(_switch)
503 stwu r1,-INT_FRAME_SIZE(r1)
504 mflr r0
505 stw r0,INT_FRAME_SIZE+4(r1)
506 /* r3-r12 are caller saved -- Cort */
507 SAVE_NVGPRS(r1)
508 stw r0,_NIP(r1) /* Return to switch caller */
509 mfmsr r11
510 li r0,MSR_FP /* Disable floating-point */
511 #ifdef CONFIG_ALTIVEC
512 BEGIN_FTR_SECTION
513 oris r0,r0,MSR_VEC@h /* Disable altivec */
514 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
515 stw r12,THREAD+THREAD_VRSAVE(r2)
516 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
517 #endif /* CONFIG_ALTIVEC */
518 #ifdef CONFIG_SPE
519 oris r0,r0,MSR_SPE@h /* Disable SPE */
520 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
521 stw r12,THREAD+THREAD_SPEFSCR(r2)
522 #endif /* CONFIG_SPE */
523 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
524 beq+ 1f
525 andc r11,r11,r0
526 MTMSRD(r11)
527 isync
528 1: stw r11,_MSR(r1)
529 mfcr r10
530 stw r10,_CCR(r1)
531 stw r1,KSP(r3) /* Set old stack pointer */
532
533 #ifdef CONFIG_SMP
534 /* We need a sync somewhere here to make sure that if the
535 * previous task gets rescheduled on another CPU, it sees all
536 * stores it has performed on this one.
537 */
538 sync
539 #endif /* CONFIG_SMP */
540
541 tophys(r0,r4)
542 CLR_TOP32(r0)
543 mtspr SPRN_SPRG3,r0 /* Update current THREAD phys addr */
544 lwz r1,KSP(r4) /* Load new stack pointer */
545
546 /* save the old current 'last' for return value */
547 mr r3,r2
548 addi r2,r4,-THREAD /* Update current */
549
550 #ifdef CONFIG_ALTIVEC
551 BEGIN_FTR_SECTION
552 lwz r0,THREAD+THREAD_VRSAVE(r2)
553 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
554 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
555 #endif /* CONFIG_ALTIVEC */
556 #ifdef CONFIG_SPE
557 lwz r0,THREAD+THREAD_SPEFSCR(r2)
558 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
559 #endif /* CONFIG_SPE */
560
561 lwz r0,_CCR(r1)
562 mtcrf 0xFF,r0
563 /* r3-r12 are destroyed -- Cort */
564 REST_NVGPRS(r1)
565
566 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
567 mtlr r4
568 addi r1,r1,INT_FRAME_SIZE
569 blr
570
571 .globl fast_exception_return
572 fast_exception_return:
573 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
574 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
575 beq 1f /* if not, we've got problems */
576 #endif
577
578 2: REST_4GPRS(3, r11)
579 lwz r10,_CCR(r11)
580 REST_GPR(1, r11)
581 mtcr r10
582 lwz r10,_LINK(r11)
583 mtlr r10
584 REST_GPR(10, r11)
585 mtspr SPRN_SRR1,r9
586 mtspr SPRN_SRR0,r12
587 REST_GPR(9, r11)
588 REST_GPR(12, r11)
589 lwz r11,GPR11(r11)
590 SYNC
591 RFI
592
593 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
594 /* check if the exception happened in a restartable section */
595 1: lis r3,exc_exit_restart_end@ha
596 addi r3,r3,exc_exit_restart_end@l
597 cmplw r12,r3
598 bge 3f
599 lis r4,exc_exit_restart@ha
600 addi r4,r4,exc_exit_restart@l
601 cmplw r12,r4
602 blt 3f
603 lis r3,fee_restarts@ha
604 tophys(r3,r3)
605 lwz r5,fee_restarts@l(r3)
606 addi r5,r5,1
607 stw r5,fee_restarts@l(r3)
608 mr r12,r4 /* restart at exc_exit_restart */
609 b 2b
610
611 .comm fee_restarts,4
612
613 /* aargh, a nonrecoverable interrupt, panic */
614 /* aargh, we don't know which trap this is */
615 /* but the 601 doesn't implement the RI bit, so assume it's OK */
616 3:
617 BEGIN_FTR_SECTION
618 b 2b
619 END_FTR_SECTION_IFSET(CPU_FTR_601)
620 li r10,-1
621 stw r10,TRAP(r11)
622 addi r3,r1,STACK_FRAME_OVERHEAD
623 lis r10,MSR_KERNEL@h
624 ori r10,r10,MSR_KERNEL@l
625 bl transfer_to_handler_full
626 .long nonrecoverable_exception
627 .long ret_from_except
628 #endif
629
630 .globl sigreturn_exit
631 sigreturn_exit:
632 subi r1,r3,STACK_FRAME_OVERHEAD
633 rlwinm r12,r1,0,0,18 /* current_thread_info() */
634 lwz r9,TI_FLAGS(r12)
635 andi. r0,r9,_TIF_SYSCALL_T_OR_A
636 beq+ ret_from_except_full
637 bl do_syscall_trace_leave
638 /* fall through */
639
640 .globl ret_from_except_full
641 ret_from_except_full:
642 REST_NVGPRS(r1)
643 /* fall through */
644
645 .globl ret_from_except
646 ret_from_except:
647 /* Hard-disable interrupts so that current_thread_info()->flags
648 * can't change between when we test it and when we return
649 * from the interrupt. */
650 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
651 SYNC /* Some chip revs have problems here... */
652 MTMSRD(r10) /* disable interrupts */
653
654 lwz r3,_MSR(r1) /* Returning to user mode? */
655 andi. r0,r3,MSR_PR
656 beq resume_kernel
657
658 user_exc_return: /* r10 contains MSR_KERNEL here */
659 /* Check current_thread_info()->flags */
660 rlwinm r9,r1,0,0,18
661 lwz r9,TI_FLAGS(r9)
662 andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED)
663 bne do_work
664
665 restore_user:
666 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
667 /* Check whether this process has its own DBCR0 value. The single
668 step bit tells us that dbcr0 should be loaded. */
669 lwz r0,THREAD+THREAD_DBCR0(r2)
670 andis. r10,r0,DBCR0_IC@h
671 bnel- load_dbcr0
672 #endif
673
674 #ifdef CONFIG_PREEMPT
675 b restore
676
677 /* N.B. the only way to get here is from the beq following ret_from_except. */
678 resume_kernel:
679 /* check current_thread_info->preempt_count */
680 rlwinm r9,r1,0,0,18
681 lwz r0,TI_PREEMPT(r9)
682 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
683 bne restore
684 lwz r0,TI_FLAGS(r9)
685 andi. r0,r0,_TIF_NEED_RESCHED
686 beq+ restore
687 andi. r0,r3,MSR_EE /* interrupts off? */
688 beq restore /* don't schedule if so */
689 1: bl preempt_schedule_irq
690 rlwinm r9,r1,0,0,18
691 lwz r3,TI_FLAGS(r9)
692 andi. r0,r3,_TIF_NEED_RESCHED
693 bne- 1b
694 #else
695 resume_kernel:
696 #endif /* CONFIG_PREEMPT */
697
698 /* interrupts are hard-disabled at this point */
699 restore:
700 lwz r0,GPR0(r1)
701 lwz r2,GPR2(r1)
702 REST_4GPRS(3, r1)
703 REST_2GPRS(7, r1)
704
705 lwz r10,_XER(r1)
706 lwz r11,_CTR(r1)
707 mtspr SPRN_XER,r10
708 mtctr r11
709
710 PPC405_ERR77(0,r1)
711 stwcx. r0,0,r1 /* to clear the reservation */
712
713 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
714 lwz r9,_MSR(r1)
715 andi. r10,r9,MSR_RI /* check if this exception occurred */
716 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
717
718 lwz r10,_CCR(r1)
719 lwz r11,_LINK(r1)
720 mtcrf 0xFF,r10
721 mtlr r11
722
723 /*
724 * Once we put values in SRR0 and SRR1, we are in a state
725 * where exceptions are not recoverable, since taking an
726 * exception will trash SRR0 and SRR1. Therefore we clear the
727 * MSR:RI bit to indicate this. If we do take an exception,
728 * we can't return to the point of the exception but we
729 * can restart the exception exit path at the label
730 * exc_exit_restart below. -- paulus
731 */
732 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
733 SYNC
734 MTMSRD(r10) /* clear the RI bit */
735 .globl exc_exit_restart
736 exc_exit_restart:
737 lwz r9,_MSR(r1)
738 lwz r12,_NIP(r1)
739 FIX_SRR1(r9,r10)
740 mtspr SPRN_SRR0,r12
741 mtspr SPRN_SRR1,r9
742 REST_4GPRS(9, r1)
743 lwz r1,GPR1(r1)
744 .globl exc_exit_restart_end
745 exc_exit_restart_end:
746 SYNC
747 RFI
748
749 #else /* !(CONFIG_4xx || CONFIG_BOOKE) */
750 /*
751 * This is a bit different on 4xx/Book-E because it doesn't have
752 * the RI bit in the MSR.
753 * The TLB miss handler checks if we have interrupted
754 * the exception exit path and restarts it if so
755 * (well maybe one day it will... :).
756 */
757 lwz r11,_LINK(r1)
758 mtlr r11
759 lwz r10,_CCR(r1)
760 mtcrf 0xff,r10
761 REST_2GPRS(9, r1)
762 .globl exc_exit_restart
763 exc_exit_restart:
764 lwz r11,_NIP(r1)
765 lwz r12,_MSR(r1)
766 exc_exit_start:
767 mtspr SPRN_SRR0,r11
768 mtspr SPRN_SRR1,r12
769 REST_2GPRS(11, r1)
770 lwz r1,GPR1(r1)
771 .globl exc_exit_restart_end
772 exc_exit_restart_end:
773 PPC405_ERR77_SYNC
774 rfi
775 b . /* prevent prefetch past rfi */
776
777 /*
778 * Returning from a critical interrupt in user mode doesn't need
779 * to be any different from a normal exception. For a critical
780 * interrupt in the kernel, we just return (without checking for
781 * preemption) since the interrupt may have happened at some crucial
782 * place (e.g. inside the TLB miss handler), and because we will be
783 * running with r1 pointing into critical_stack, not the current
784 * process's kernel stack (and therefore current_thread_info() will
785 * give the wrong answer).
786 * We have to restore various SPRs that may have been in use at the
787 * time of the critical interrupt.
788 *
789 */
790 #ifdef CONFIG_40x
791 #define PPC_40x_TURN_OFF_MSR_DR \
792 /* avoid any possible TLB misses here by turning off MSR.DR, we \
793 * assume the instructions here are mapped by a pinned TLB entry */ \
794 li r10,MSR_IR; \
795 mtmsr r10; \
796 isync; \
797 tophys(r1, r1);
798 #else
799 #define PPC_40x_TURN_OFF_MSR_DR
800 #endif
801
802 #define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
803 REST_NVGPRS(r1); \
804 lwz r3,_MSR(r1); \
805 andi. r3,r3,MSR_PR; \
806 LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
807 bne user_exc_return; \
808 lwz r0,GPR0(r1); \
809 lwz r2,GPR2(r1); \
810 REST_4GPRS(3, r1); \
811 REST_2GPRS(7, r1); \
812 lwz r10,_XER(r1); \
813 lwz r11,_CTR(r1); \
814 mtspr SPRN_XER,r10; \
815 mtctr r11; \
816 PPC405_ERR77(0,r1); \
817 stwcx. r0,0,r1; /* to clear the reservation */ \
818 lwz r11,_LINK(r1); \
819 mtlr r11; \
820 lwz r10,_CCR(r1); \
821 mtcrf 0xff,r10; \
822 PPC_40x_TURN_OFF_MSR_DR; \
823 lwz r9,_DEAR(r1); \
824 lwz r10,_ESR(r1); \
825 mtspr SPRN_DEAR,r9; \
826 mtspr SPRN_ESR,r10; \
827 lwz r11,_NIP(r1); \
828 lwz r12,_MSR(r1); \
829 mtspr exc_lvl_srr0,r11; \
830 mtspr exc_lvl_srr1,r12; \
831 lwz r9,GPR9(r1); \
832 lwz r12,GPR12(r1); \
833 lwz r10,GPR10(r1); \
834 lwz r11,GPR11(r1); \
835 lwz r1,GPR1(r1); \
836 PPC405_ERR77_SYNC; \
837 exc_lvl_rfi; \
838 b .; /* prevent prefetch past exc_lvl_rfi */
839
840 .globl ret_from_crit_exc
841 ret_from_crit_exc:
842 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
843
844 #ifdef CONFIG_BOOKE
845 .globl ret_from_debug_exc
846 ret_from_debug_exc:
847 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI)
848
849 .globl ret_from_mcheck_exc
850 ret_from_mcheck_exc:
851 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI)
852 #endif /* CONFIG_BOOKE */
853
854 /*
855 * Load the DBCR0 value for a task that is being ptraced,
856 * having first saved away the global DBCR0. Note that r0
857 * has the dbcr0 value to set upon entry to this.
858 */
859 load_dbcr0:
860 mfmsr r10 /* first disable debug exceptions */
861 rlwinm r10,r10,0,~MSR_DE
862 mtmsr r10
863 isync
864 mfspr r10,SPRN_DBCR0
865 lis r11,global_dbcr0@ha
866 addi r11,r11,global_dbcr0@l
867 stw r10,0(r11)
868 mtspr SPRN_DBCR0,r0
869 lwz r10,4(r11)
870 addi r10,r10,1
871 stw r10,4(r11)
872 li r11,-1
873 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
874 blr
875
876 .comm global_dbcr0,8
877 #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
878
879 do_work: /* r10 contains MSR_KERNEL here */
880 andi. r0,r9,_TIF_NEED_RESCHED
881 beq do_user_signal
882
883 do_resched: /* r10 contains MSR_KERNEL here */
884 ori r10,r10,MSR_EE
885 SYNC
886 MTMSRD(r10) /* hard-enable interrupts */
887 bl schedule
888 recheck:
889 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
890 SYNC
891 MTMSRD(r10) /* disable interrupts */
892 rlwinm r9,r1,0,0,18
893 lwz r9,TI_FLAGS(r9)
894 andi. r0,r9,_TIF_NEED_RESCHED
895 bne- do_resched
896 andi. r0,r9,_TIF_SIGPENDING
897 beq restore_user
898 do_user_signal: /* r10 contains MSR_KERNEL here */
899 ori r10,r10,MSR_EE
900 SYNC
901 MTMSRD(r10) /* hard-enable interrupts */
902 /* save r13-r31 in the exception frame, if not already done */
903 lwz r3,TRAP(r1)
904 andi. r0,r3,1
905 beq 2f
906 SAVE_NVGPRS(r1)
907 rlwinm r3,r3,0,0,30
908 stw r3,TRAP(r1)
909 2: li r3,0
910 addi r4,r1,STACK_FRAME_OVERHEAD
911 bl do_signal
912 REST_NVGPRS(r1)
913 b recheck
914
915 /*
916 * We come here when we are at the end of handling an exception
917 * that occurred at a place where taking an exception will lose
918 * state information, such as the contents of SRR0 and SRR1.
919 */
920 nonrecoverable:
921 lis r10,exc_exit_restart_end@ha
922 addi r10,r10,exc_exit_restart_end@l
923 cmplw r12,r10
924 bge 3f
925 lis r11,exc_exit_restart@ha
926 addi r11,r11,exc_exit_restart@l
927 cmplw r12,r11
928 blt 3f
929 lis r10,ee_restarts@ha
930 lwz r12,ee_restarts@l(r10)
931 addi r12,r12,1
932 stw r12,ee_restarts@l(r10)
933 mr r12,r11 /* restart at exc_exit_restart */
934 blr
935 3: /* OK, we can't recover, kill this process */
936 /* but the 601 doesn't implement the RI bit, so assume it's OK */
937 BEGIN_FTR_SECTION
938 blr
939 END_FTR_SECTION_IFSET(CPU_FTR_601)
940 lwz r3,TRAP(r1)
941 andi. r0,r3,1
942 beq 4f
943 SAVE_NVGPRS(r1)
944 rlwinm r3,r3,0,0,30
945 stw r3,TRAP(r1)
946 4: addi r3,r1,STACK_FRAME_OVERHEAD
947 bl nonrecoverable_exception
948 /* shouldn't return */
949 b 4b
950
951 .comm ee_restarts,4
952
953 /*
954 * PROM code for specific machines follows. Put it
955 * here so it's easy to add arch-specific sections later.
956 * -- Cort
957 */
958 #ifdef CONFIG_PPC_OF
959 /*
960 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
961 * called with the MMU off.
962 */
963 _GLOBAL(enter_rtas)
964 stwu r1,-INT_FRAME_SIZE(r1)
965 mflr r0
966 stw r0,INT_FRAME_SIZE+4(r1)
967 lis r4,rtas_data@ha
968 lwz r4,rtas_data@l(r4)
969 lis r6,1f@ha /* physical return address for rtas */
970 addi r6,r6,1f@l
971 tophys(r6,r6)
972 tophys(r7,r1)
973 lis r8,rtas_entry@ha
974 lwz r8,rtas_entry@l(r8)
975 mfmsr r9
976 stw r9,8(r1)
977 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
978 SYNC /* disable interrupts so SRR0/1 */
979 MTMSRD(r0) /* don't get trashed */
980 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
981 mtlr r6
982 CLR_TOP32(r7)
983 mtspr SPRN_SPRG2,r7
984 mtspr SPRN_SRR0,r8
985 mtspr SPRN_SRR1,r9
986 RFI
987 1: tophys(r9,r1)
988 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
989 lwz r9,8(r9) /* original msr value */
990 FIX_SRR1(r9,r0)
991 addi r1,r1,INT_FRAME_SIZE
992 li r0,0
993 mtspr SPRN_SPRG2,r0
994 mtspr SPRN_SRR0,r8
995 mtspr SPRN_SRR1,r9
996 RFI /* return to caller */
997
998 .globl machine_check_in_rtas
999 machine_check_in_rtas:
1000 twi 31,0,0
1001 /* XXX load up BATs and panic */
1002
1003 #endif /* CONFIG_PPC_OF */
This page took 0.052482 seconds and 5 git commands to generate.