5f075dbc4ee72546e710a546db046ca58a1ad5a2
[deliverable/linux.git] / arch / ppc / kernel / entry.S
1 /*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
22 #include <linux/config.h>
23 #include <linux/errno.h>
24 #include <linux/sys.h>
25 #include <linux/threads.h>
26 #include <asm/processor.h>
27 #include <asm/page.h>
28 #include <asm/mmu.h>
29 #include <asm/cputable.h>
30 #include <asm/thread_info.h>
31 #include <asm/ppc_asm.h>
32 #include <asm/offsets.h>
33 #include <asm/unistd.h>
34
35 #undef SHOW_SYSCALLS
36 #undef SHOW_SYSCALLS_TASK
37
38 /*
39 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
40 */
41 #if MSR_KERNEL >= 0x10000
42 #define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
43 #else
44 #define LOAD_MSR_KERNEL(r, x) li r,(x)
45 #endif
46
47 #ifdef CONFIG_BOOKE
48 #include "head_booke.h"
49 .globl mcheck_transfer_to_handler
50 mcheck_transfer_to_handler:
51 mtspr MCHECK_SPRG,r8
52 BOOKE_LOAD_MCHECK_STACK
53 lwz r0,GPR10-INT_FRAME_SIZE(r8)
54 stw r0,GPR10(r11)
55 lwz r0,GPR11-INT_FRAME_SIZE(r8)
56 stw r0,GPR11(r11)
57 mfspr r8,MCHECK_SPRG
58 b transfer_to_handler_full
59
60 .globl crit_transfer_to_handler
61 crit_transfer_to_handler:
62 mtspr CRIT_SPRG,r8
63 BOOKE_LOAD_CRIT_STACK
64 lwz r0,GPR10-INT_FRAME_SIZE(r8)
65 stw r0,GPR10(r11)
66 lwz r0,GPR11-INT_FRAME_SIZE(r8)
67 stw r0,GPR11(r11)
68 mfspr r8,CRIT_SPRG
69 /* fall through */
70 #endif
71
72 #ifdef CONFIG_40x
73 .globl crit_transfer_to_handler
74 crit_transfer_to_handler:
75 lwz r0,crit_r10@l(0)
76 stw r0,GPR10(r11)
77 lwz r0,crit_r11@l(0)
78 stw r0,GPR11(r11)
79 /* fall through */
80 #endif
81
82 /*
83 * This code finishes saving the registers to the exception frame
84 * and jumps to the appropriate handler for the exception, turning
85 * on address translation.
86 * Note that we rely on the caller having set cr0.eq iff the exception
87 * occurred in kernel mode (i.e. MSR:PR = 0).
88 */
89 .globl transfer_to_handler_full
90 transfer_to_handler_full:
91 SAVE_NVGPRS(r11)
92 /* fall through */
93
94 .globl transfer_to_handler
95 transfer_to_handler:
96 stw r2,GPR2(r11)
97 stw r12,_NIP(r11)
98 stw r9,_MSR(r11)
99 andi. r2,r9,MSR_PR
100 mfctr r12
101 mfspr r2,SPRN_XER
102 stw r12,_CTR(r11)
103 stw r2,_XER(r11)
104 mfspr r12,SPRN_SPRG3
105 addi r2,r12,-THREAD
106 tovirt(r2,r2) /* set r2 to current */
107 beq 2f /* if from user, fix up THREAD.regs */
108 addi r11,r1,STACK_FRAME_OVERHEAD
109 stw r11,PT_REGS(r12)
110 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
111 /* Check to see if the dbcr0 register is set up to debug. Use the
112 single-step bit to do this. */
113 lwz r12,THREAD_DBCR0(r12)
114 andis. r12,r12,DBCR0_IC@h
115 beq+ 3f
116 /* From user and task is ptraced - load up global dbcr0 */
117 li r12,-1 /* clear all pending debug events */
118 mtspr SPRN_DBSR,r12
119 lis r11,global_dbcr0@ha
120 tophys(r11,r11)
121 addi r11,r11,global_dbcr0@l
122 lwz r12,0(r11)
123 mtspr SPRN_DBCR0,r12
124 lwz r12,4(r11)
125 addi r12,r12,-1
126 stw r12,4(r11)
127 #endif
128 b 3f
129 2: /* if from kernel, check interrupted DOZE/NAP mode and
130 * check for stack overflow
131 */
132 #ifdef CONFIG_6xx
133 mfspr r11,SPRN_HID0
134 mtcr r11
135 BEGIN_FTR_SECTION
136 bt- 8,power_save_6xx_restore /* Check DOZE */
137 END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
138 BEGIN_FTR_SECTION
139 bt- 9,power_save_6xx_restore /* Check NAP */
140 END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
141 #endif /* CONFIG_6xx */
142 .globl transfer_to_handler_cont
143 transfer_to_handler_cont:
144 lwz r11,THREAD_INFO-THREAD(r12)
145 cmplw r1,r11 /* if r1 <= current->thread_info */
146 ble- stack_ovf /* then the kernel stack overflowed */
147 3:
148 mflr r9
149 lwz r11,0(r9) /* virtual address of handler */
150 lwz r9,4(r9) /* where to go when done */
151 FIX_SRR1(r10,r12)
152 mtspr SPRN_SRR0,r11
153 mtspr SPRN_SRR1,r10
154 mtlr r9
155 SYNC
156 RFI /* jump to handler, enable MMU */
157
158 /*
159 * On kernel stack overflow, load up an initial stack pointer
160 * and call StackOverflow(regs), which should not return.
161 */
162 stack_ovf:
163 /* sometimes we use a statically-allocated stack, which is OK. */
164 lis r11,_end@h
165 ori r11,r11,_end@l
166 cmplw r1,r11
167 ble 3b /* r1 <= &_end is OK */
168 SAVE_NVGPRS(r11)
169 addi r3,r1,STACK_FRAME_OVERHEAD
170 lis r1,init_thread_union@ha
171 addi r1,r1,init_thread_union@l
172 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
173 lis r9,StackOverflow@ha
174 addi r9,r9,StackOverflow@l
175 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
176 FIX_SRR1(r10,r12)
177 mtspr SPRN_SRR0,r9
178 mtspr SPRN_SRR1,r10
179 SYNC
180 RFI
181
182 /*
183 * Handle a system call.
184 */
185 .stabs "arch/ppc/kernel/",N_SO,0,0,0f
186 .stabs "entry.S",N_SO,0,0,0f
187 0:
188
189 _GLOBAL(DoSyscall)
190 stw r0,THREAD+LAST_SYSCALL(r2)
191 stw r3,ORIG_GPR3(r1)
192 li r12,0
193 stw r12,RESULT(r1)
194 lwz r11,_CCR(r1) /* Clear SO bit in CR */
195 rlwinm r11,r11,0,4,2
196 stw r11,_CCR(r1)
197 #ifdef SHOW_SYSCALLS
198 bl do_show_syscall
199 #endif /* SHOW_SYSCALLS */
200 rlwinm r10,r1,0,0,18 /* current_thread_info() */
201 lwz r11,TI_LOCAL_FLAGS(r10)
202 rlwinm r11,r11,0,~_TIFL_FORCE_NOERROR
203 stw r11,TI_LOCAL_FLAGS(r10)
204 lwz r11,TI_FLAGS(r10)
205 andi. r11,r11,_TIF_SYSCALL_TRACE
206 bne- syscall_dotrace
207 syscall_dotrace_cont:
208 cmplwi 0,r0,NR_syscalls
209 lis r10,sys_call_table@h
210 ori r10,r10,sys_call_table@l
211 slwi r0,r0,2
212 bge- 66f
213 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
214 mtlr r10
215 addi r9,r1,STACK_FRAME_OVERHEAD
216 blrl /* Call handler */
217 .globl ret_from_syscall
218 ret_from_syscall:
219 #ifdef SHOW_SYSCALLS
220 bl do_show_syscall_exit
221 #endif
222 mr r6,r3
223 li r11,-_LAST_ERRNO
224 cmplw 0,r3,r11
225 rlwinm r12,r1,0,0,18 /* current_thread_info() */
226 blt+ 30f
227 lwz r11,TI_LOCAL_FLAGS(r12)
228 andi. r11,r11,_TIFL_FORCE_NOERROR
229 bne 30f
230 neg r3,r3
231 lwz r10,_CCR(r1) /* Set SO bit in CR */
232 oris r10,r10,0x1000
233 stw r10,_CCR(r1)
234
235 /* disable interrupts so current_thread_info()->flags can't change */
236 30: LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
237 SYNC
238 MTMSRD(r10)
239 lwz r9,TI_FLAGS(r12)
240 andi. r0,r9,(_TIF_SYSCALL_TRACE|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
241 bne- syscall_exit_work
242 syscall_exit_cont:
243 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
244 /* If the process has its own DBCR0 value, load it up. The single
245 step bit tells us that dbcr0 should be loaded. */
246 lwz r0,THREAD+THREAD_DBCR0(r2)
247 andis. r10,r0,DBCR0_IC@h
248 bnel- load_dbcr0
249 #endif
250 stwcx. r0,0,r1 /* to clear the reservation */
251 lwz r4,_LINK(r1)
252 lwz r5,_CCR(r1)
253 mtlr r4
254 mtcr r5
255 lwz r7,_NIP(r1)
256 lwz r8,_MSR(r1)
257 FIX_SRR1(r8, r0)
258 lwz r2,GPR2(r1)
259 lwz r1,GPR1(r1)
260 mtspr SPRN_SRR0,r7
261 mtspr SPRN_SRR1,r8
262 SYNC
263 RFI
264
265 66: li r3,-ENOSYS
266 b ret_from_syscall
267
268 .globl ret_from_fork
269 ret_from_fork:
270 REST_NVGPRS(r1)
271 bl schedule_tail
272 li r3,0
273 b ret_from_syscall
274
275 /* Traced system call support */
276 syscall_dotrace:
277 SAVE_NVGPRS(r1)
278 li r0,0xc00
279 stw r0,TRAP(r1)
280 bl do_syscall_trace
281 lwz r0,GPR0(r1) /* Restore original registers */
282 lwz r3,GPR3(r1)
283 lwz r4,GPR4(r1)
284 lwz r5,GPR5(r1)
285 lwz r6,GPR6(r1)
286 lwz r7,GPR7(r1)
287 lwz r8,GPR8(r1)
288 REST_NVGPRS(r1)
289 b syscall_dotrace_cont
290
291 syscall_exit_work:
292 stw r6,RESULT(r1) /* Save result */
293 stw r3,GPR3(r1) /* Update return value */
294 andi. r0,r9,_TIF_SYSCALL_TRACE
295 beq 5f
296 ori r10,r10,MSR_EE
297 SYNC
298 MTMSRD(r10) /* re-enable interrupts */
299 lwz r4,TRAP(r1)
300 andi. r4,r4,1
301 beq 4f
302 SAVE_NVGPRS(r1)
303 li r4,0xc00
304 stw r4,TRAP(r1)
305 4:
306 bl do_syscall_trace
307 REST_NVGPRS(r1)
308 2:
309 lwz r3,GPR3(r1)
310 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
311 SYNC
312 MTMSRD(r10) /* disable interrupts again */
313 rlwinm r12,r1,0,0,18 /* current_thread_info() */
314 lwz r9,TI_FLAGS(r12)
315 5:
316 andi. r0,r9,_TIF_NEED_RESCHED
317 bne 1f
318 lwz r5,_MSR(r1)
319 andi. r5,r5,MSR_PR
320 beq syscall_exit_cont
321 andi. r0,r9,_TIF_SIGPENDING
322 beq syscall_exit_cont
323 b do_user_signal
324 1:
325 ori r10,r10,MSR_EE
326 SYNC
327 MTMSRD(r10) /* re-enable interrupts */
328 bl schedule
329 b 2b
330
331 #ifdef SHOW_SYSCALLS
332 do_show_syscall:
333 #ifdef SHOW_SYSCALLS_TASK
334 lis r11,show_syscalls_task@ha
335 lwz r11,show_syscalls_task@l(r11)
336 cmp 0,r2,r11
337 bnelr
338 #endif
339 stw r31,GPR31(r1)
340 mflr r31
341 lis r3,7f@ha
342 addi r3,r3,7f@l
343 lwz r4,GPR0(r1)
344 lwz r5,GPR3(r1)
345 lwz r6,GPR4(r1)
346 lwz r7,GPR5(r1)
347 lwz r8,GPR6(r1)
348 lwz r9,GPR7(r1)
349 bl printk
350 lis r3,77f@ha
351 addi r3,r3,77f@l
352 lwz r4,GPR8(r1)
353 mr r5,r2
354 bl printk
355 lwz r0,GPR0(r1)
356 lwz r3,GPR3(r1)
357 lwz r4,GPR4(r1)
358 lwz r5,GPR5(r1)
359 lwz r6,GPR6(r1)
360 lwz r7,GPR7(r1)
361 lwz r8,GPR8(r1)
362 mtlr r31
363 lwz r31,GPR31(r1)
364 blr
365
366 do_show_syscall_exit:
367 #ifdef SHOW_SYSCALLS_TASK
368 lis r11,show_syscalls_task@ha
369 lwz r11,show_syscalls_task@l(r11)
370 cmp 0,r2,r11
371 bnelr
372 #endif
373 stw r31,GPR31(r1)
374 mflr r31
375 stw r3,RESULT(r1) /* Save result */
376 mr r4,r3
377 lis r3,79f@ha
378 addi r3,r3,79f@l
379 bl printk
380 lwz r3,RESULT(r1)
381 mtlr r31
382 lwz r31,GPR31(r1)
383 blr
384
385 7: .string "syscall %d(%x, %x, %x, %x, %x, "
386 77: .string "%x), current=%p\n"
387 79: .string " -> %x\n"
388 .align 2,0
389
390 #ifdef SHOW_SYSCALLS_TASK
391 .data
392 .globl show_syscalls_task
393 show_syscalls_task:
394 .long -1
395 .text
396 #endif
397 #endif /* SHOW_SYSCALLS */
398
399 /*
400 * The sigsuspend and rt_sigsuspend system calls can call do_signal
401 * and thus put the process into the stopped state where we might
402 * want to examine its user state with ptrace. Therefore we need
403 * to save all the nonvolatile registers (r13 - r31) before calling
404 * the C code.
405 */
406 .globl ppc_sigsuspend
407 ppc_sigsuspend:
408 SAVE_NVGPRS(r1)
409 lwz r0,TRAP(r1)
410 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
411 stw r0,TRAP(r1) /* register set saved */
412 b sys_sigsuspend
413
414 .globl ppc_rt_sigsuspend
415 ppc_rt_sigsuspend:
416 SAVE_NVGPRS(r1)
417 lwz r0,TRAP(r1)
418 rlwinm r0,r0,0,0,30
419 stw r0,TRAP(r1)
420 b sys_rt_sigsuspend
421
422 .globl ppc_fork
423 ppc_fork:
424 SAVE_NVGPRS(r1)
425 lwz r0,TRAP(r1)
426 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
427 stw r0,TRAP(r1) /* register set saved */
428 b sys_fork
429
430 .globl ppc_vfork
431 ppc_vfork:
432 SAVE_NVGPRS(r1)
433 lwz r0,TRAP(r1)
434 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
435 stw r0,TRAP(r1) /* register set saved */
436 b sys_vfork
437
438 .globl ppc_clone
439 ppc_clone:
440 SAVE_NVGPRS(r1)
441 lwz r0,TRAP(r1)
442 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
443 stw r0,TRAP(r1) /* register set saved */
444 b sys_clone
445
446 .globl ppc_swapcontext
447 ppc_swapcontext:
448 SAVE_NVGPRS(r1)
449 lwz r0,TRAP(r1)
450 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
451 stw r0,TRAP(r1) /* register set saved */
452 b sys_swapcontext
453
454 /*
455 * Top-level page fault handling.
456 * This is in assembler because if do_page_fault tells us that
457 * it is a bad kernel page fault, we want to save the non-volatile
458 * registers before calling bad_page_fault.
459 */
460 .globl handle_page_fault
461 handle_page_fault:
462 stw r4,_DAR(r1)
463 addi r3,r1,STACK_FRAME_OVERHEAD
464 bl do_page_fault
465 cmpwi r3,0
466 beq+ ret_from_except
467 SAVE_NVGPRS(r1)
468 lwz r0,TRAP(r1)
469 clrrwi r0,r0,1
470 stw r0,TRAP(r1)
471 mr r5,r3
472 addi r3,r1,STACK_FRAME_OVERHEAD
473 lwz r4,_DAR(r1)
474 bl bad_page_fault
475 b ret_from_except_full
476
477 /*
478 * This routine switches between two different tasks. The process
479 * state of one is saved on its kernel stack. Then the state
480 * of the other is restored from its kernel stack. The memory
481 * management hardware is updated to the second process's state.
482 * Finally, we can return to the second process.
483 * On entry, r3 points to the THREAD for the current task, r4
484 * points to the THREAD for the new task.
485 *
486 * This routine is always called with interrupts disabled.
487 *
488 * Note: there are two ways to get to the "going out" portion
489 * of this code; either by coming in via the entry (_switch)
490 * or via "fork" which must set up an environment equivalent
491 * to the "_switch" path. If you change this , you'll have to
492 * change the fork code also.
493 *
494 * The code which creates the new task context is in 'copy_thread'
495 * in arch/ppc/kernel/process.c
496 */
497 _GLOBAL(_switch)
498 stwu r1,-INT_FRAME_SIZE(r1)
499 mflr r0
500 stw r0,INT_FRAME_SIZE+4(r1)
501 /* r3-r12 are caller saved -- Cort */
502 SAVE_NVGPRS(r1)
503 stw r0,_NIP(r1) /* Return to switch caller */
504 mfmsr r11
505 li r0,MSR_FP /* Disable floating-point */
506 #ifdef CONFIG_ALTIVEC
507 BEGIN_FTR_SECTION
508 oris r0,r0,MSR_VEC@h /* Disable altivec */
509 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
510 stw r12,THREAD+THREAD_VRSAVE(r2)
511 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
512 #endif /* CONFIG_ALTIVEC */
513 #ifdef CONFIG_SPE
514 oris r0,r0,MSR_SPE@h /* Disable SPE */
515 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
516 stw r12,THREAD+THREAD_SPEFSCR(r2)
517 #endif /* CONFIG_SPE */
518 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
519 beq+ 1f
520 andc r11,r11,r0
521 MTMSRD(r11)
522 isync
523 1: stw r11,_MSR(r1)
524 mfcr r10
525 stw r10,_CCR(r1)
526 stw r1,KSP(r3) /* Set old stack pointer */
527
528 #ifdef CONFIG_SMP
529 /* We need a sync somewhere here to make sure that if the
530 * previous task gets rescheduled on another CPU, it sees all
531 * stores it has performed on this one.
532 */
533 sync
534 #endif /* CONFIG_SMP */
535
536 tophys(r0,r4)
537 CLR_TOP32(r0)
538 mtspr SPRN_SPRG3,r0 /* Update current THREAD phys addr */
539 lwz r1,KSP(r4) /* Load new stack pointer */
540
541 /* save the old current 'last' for return value */
542 mr r3,r2
543 addi r2,r4,-THREAD /* Update current */
544
545 #ifdef CONFIG_ALTIVEC
546 BEGIN_FTR_SECTION
547 lwz r0,THREAD+THREAD_VRSAVE(r2)
548 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
549 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
550 #endif /* CONFIG_ALTIVEC */
551 #ifdef CONFIG_SPE
552 lwz r0,THREAD+THREAD_SPEFSCR(r2)
553 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
554 #endif /* CONFIG_SPE */
555
556 lwz r0,_CCR(r1)
557 mtcrf 0xFF,r0
558 /* r3-r12 are destroyed -- Cort */
559 REST_NVGPRS(r1)
560
561 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
562 mtlr r4
563 addi r1,r1,INT_FRAME_SIZE
564 blr
565
566 .globl fast_exception_return
567 fast_exception_return:
568 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
569 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
570 beq 1f /* if not, we've got problems */
571 #endif
572
573 2: REST_4GPRS(3, r11)
574 lwz r10,_CCR(r11)
575 REST_GPR(1, r11)
576 mtcr r10
577 lwz r10,_LINK(r11)
578 mtlr r10
579 REST_GPR(10, r11)
580 mtspr SPRN_SRR1,r9
581 mtspr SPRN_SRR0,r12
582 REST_GPR(9, r11)
583 REST_GPR(12, r11)
584 lwz r11,GPR11(r11)
585 SYNC
586 RFI
587
588 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
589 /* check if the exception happened in a restartable section */
590 1: lis r3,exc_exit_restart_end@ha
591 addi r3,r3,exc_exit_restart_end@l
592 cmplw r12,r3
593 bge 3f
594 lis r4,exc_exit_restart@ha
595 addi r4,r4,exc_exit_restart@l
596 cmplw r12,r4
597 blt 3f
598 lis r3,fee_restarts@ha
599 tophys(r3,r3)
600 lwz r5,fee_restarts@l(r3)
601 addi r5,r5,1
602 stw r5,fee_restarts@l(r3)
603 mr r12,r4 /* restart at exc_exit_restart */
604 b 2b
605
606 .comm fee_restarts,4
607
608 /* aargh, a nonrecoverable interrupt, panic */
609 /* aargh, we don't know which trap this is */
610 /* but the 601 doesn't implement the RI bit, so assume it's OK */
611 3:
612 BEGIN_FTR_SECTION
613 b 2b
614 END_FTR_SECTION_IFSET(CPU_FTR_601)
615 li r10,-1
616 stw r10,TRAP(r11)
617 addi r3,r1,STACK_FRAME_OVERHEAD
618 lis r10,MSR_KERNEL@h
619 ori r10,r10,MSR_KERNEL@l
620 bl transfer_to_handler_full
621 .long nonrecoverable_exception
622 .long ret_from_except
623 #endif
624
625 .globl sigreturn_exit
626 sigreturn_exit:
627 subi r1,r3,STACK_FRAME_OVERHEAD
628 rlwinm r12,r1,0,0,18 /* current_thread_info() */
629 lwz r9,TI_FLAGS(r12)
630 andi. r0,r9,_TIF_SYSCALL_TRACE
631 bnel- do_syscall_trace
632 /* fall through */
633
634 .globl ret_from_except_full
635 ret_from_except_full:
636 REST_NVGPRS(r1)
637 /* fall through */
638
639 .globl ret_from_except
640 ret_from_except:
641 /* Hard-disable interrupts so that current_thread_info()->flags
642 * can't change between when we test it and when we return
643 * from the interrupt. */
644 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
645 SYNC /* Some chip revs have problems here... */
646 MTMSRD(r10) /* disable interrupts */
647
648 lwz r3,_MSR(r1) /* Returning to user mode? */
649 andi. r0,r3,MSR_PR
650 beq resume_kernel
651
652 user_exc_return: /* r10 contains MSR_KERNEL here */
653 /* Check current_thread_info()->flags */
654 rlwinm r9,r1,0,0,18
655 lwz r9,TI_FLAGS(r9)
656 andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED)
657 bne do_work
658
659 restore_user:
660 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
661 /* Check whether this process has its own DBCR0 value. The single
662 step bit tells us that dbcr0 should be loaded. */
663 lwz r0,THREAD+THREAD_DBCR0(r2)
664 andis. r10,r0,DBCR0_IC@h
665 bnel- load_dbcr0
666 #endif
667
668 #ifdef CONFIG_PREEMPT
669 b restore
670
671 /* N.B. the only way to get here is from the beq following ret_from_except. */
672 resume_kernel:
673 /* check current_thread_info->preempt_count */
674 rlwinm r9,r1,0,0,18
675 lwz r0,TI_PREEMPT(r9)
676 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
677 bne restore
678 lwz r0,TI_FLAGS(r9)
679 andi. r0,r0,_TIF_NEED_RESCHED
680 beq+ restore
681 andi. r0,r3,MSR_EE /* interrupts off? */
682 beq restore /* don't schedule if so */
683 1: bl preempt_schedule_irq
684 rlwinm r9,r1,0,0,18
685 lwz r3,TI_FLAGS(r9)
686 andi. r0,r3,_TIF_NEED_RESCHED
687 bne- 1b
688 #else
689 resume_kernel:
690 #endif /* CONFIG_PREEMPT */
691
692 /* interrupts are hard-disabled at this point */
693 restore:
694 lwz r0,GPR0(r1)
695 lwz r2,GPR2(r1)
696 REST_4GPRS(3, r1)
697 REST_2GPRS(7, r1)
698
699 lwz r10,_XER(r1)
700 lwz r11,_CTR(r1)
701 mtspr SPRN_XER,r10
702 mtctr r11
703
704 PPC405_ERR77(0,r1)
705 stwcx. r0,0,r1 /* to clear the reservation */
706
707 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
708 lwz r9,_MSR(r1)
709 andi. r10,r9,MSR_RI /* check if this exception occurred */
710 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
711
712 lwz r10,_CCR(r1)
713 lwz r11,_LINK(r1)
714 mtcrf 0xFF,r10
715 mtlr r11
716
717 /*
718 * Once we put values in SRR0 and SRR1, we are in a state
719 * where exceptions are not recoverable, since taking an
720 * exception will trash SRR0 and SRR1. Therefore we clear the
721 * MSR:RI bit to indicate this. If we do take an exception,
722 * we can't return to the point of the exception but we
723 * can restart the exception exit path at the label
724 * exc_exit_restart below. -- paulus
725 */
726 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
727 SYNC
728 MTMSRD(r10) /* clear the RI bit */
729 .globl exc_exit_restart
730 exc_exit_restart:
731 lwz r9,_MSR(r1)
732 lwz r12,_NIP(r1)
733 FIX_SRR1(r9,r10)
734 mtspr SPRN_SRR0,r12
735 mtspr SPRN_SRR1,r9
736 REST_4GPRS(9, r1)
737 lwz r1,GPR1(r1)
738 .globl exc_exit_restart_end
739 exc_exit_restart_end:
740 SYNC
741 RFI
742
743 #else /* !(CONFIG_4xx || CONFIG_BOOKE) */
744 /*
745 * This is a bit different on 4xx/Book-E because it doesn't have
746 * the RI bit in the MSR.
747 * The TLB miss handler checks if we have interrupted
748 * the exception exit path and restarts it if so
749 * (well maybe one day it will... :).
750 */
751 lwz r11,_LINK(r1)
752 mtlr r11
753 lwz r10,_CCR(r1)
754 mtcrf 0xff,r10
755 REST_2GPRS(9, r1)
756 .globl exc_exit_restart
757 exc_exit_restart:
758 lwz r11,_NIP(r1)
759 lwz r12,_MSR(r1)
760 exc_exit_start:
761 mtspr SPRN_SRR0,r11
762 mtspr SPRN_SRR1,r12
763 REST_2GPRS(11, r1)
764 lwz r1,GPR1(r1)
765 .globl exc_exit_restart_end
766 exc_exit_restart_end:
767 PPC405_ERR77_SYNC
768 rfi
769 b . /* prevent prefetch past rfi */
770
771 /*
772 * Returning from a critical interrupt in user mode doesn't need
773 * to be any different from a normal exception. For a critical
774 * interrupt in the kernel, we just return (without checking for
775 * preemption) since the interrupt may have happened at some crucial
776 * place (e.g. inside the TLB miss handler), and because we will be
777 * running with r1 pointing into critical_stack, not the current
778 * process's kernel stack (and therefore current_thread_info() will
779 * give the wrong answer).
780 * We have to restore various SPRs that may have been in use at the
781 * time of the critical interrupt.
782 *
783 */
784 .globl ret_from_crit_exc
785 ret_from_crit_exc:
786 REST_NVGPRS(r1)
787 lwz r3,_MSR(r1)
788 andi. r3,r3,MSR_PR
789 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
790 bne user_exc_return
791
792 lwz r0,GPR0(r1)
793 lwz r2,GPR2(r1)
794 REST_4GPRS(3, r1)
795 REST_2GPRS(7, r1)
796
797 lwz r10,_XER(r1)
798 lwz r11,_CTR(r1)
799 mtspr SPRN_XER,r10
800 mtctr r11
801
802 PPC405_ERR77(0,r1)
803 stwcx. r0,0,r1 /* to clear the reservation */
804
805 lwz r11,_LINK(r1)
806 mtlr r11
807 lwz r10,_CCR(r1)
808 mtcrf 0xff,r10
809 #ifdef CONFIG_40x
810 /* avoid any possible TLB misses here by turning off MSR.DR, we
811 * assume the instructions here are mapped by a pinned TLB entry */
812 li r10,MSR_IR
813 mtmsr r10
814 isync
815 tophys(r1, r1)
816 #endif
817 lwz r9,_DEAR(r1)
818 lwz r10,_ESR(r1)
819 mtspr SPRN_DEAR,r9
820 mtspr SPRN_ESR,r10
821 lwz r11,_NIP(r1)
822 lwz r12,_MSR(r1)
823 mtspr SPRN_CSRR0,r11
824 mtspr SPRN_CSRR1,r12
825 lwz r9,GPR9(r1)
826 lwz r12,GPR12(r1)
827 lwz r10,GPR10(r1)
828 lwz r11,GPR11(r1)
829 lwz r1,GPR1(r1)
830 PPC405_ERR77_SYNC
831 rfci
832 b . /* prevent prefetch past rfci */
833
834 #ifdef CONFIG_BOOKE
835 /*
836 * Return from a machine check interrupt, similar to a critical
837 * interrupt.
838 */
839 .globl ret_from_mcheck_exc
840 ret_from_mcheck_exc:
841 REST_NVGPRS(r1)
842 lwz r3,_MSR(r1)
843 andi. r3,r3,MSR_PR
844 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
845 bne user_exc_return
846
847 lwz r0,GPR0(r1)
848 lwz r2,GPR2(r1)
849 REST_4GPRS(3, r1)
850 REST_2GPRS(7, r1)
851
852 lwz r10,_XER(r1)
853 lwz r11,_CTR(r1)
854 mtspr SPRN_XER,r10
855 mtctr r11
856
857 stwcx. r0,0,r1 /* to clear the reservation */
858
859 lwz r11,_LINK(r1)
860 mtlr r11
861 lwz r10,_CCR(r1)
862 mtcrf 0xff,r10
863 lwz r9,_DEAR(r1)
864 lwz r10,_ESR(r1)
865 mtspr SPRN_DEAR,r9
866 mtspr SPRN_ESR,r10
867 lwz r11,_NIP(r1)
868 lwz r12,_MSR(r1)
869 mtspr SPRN_MCSRR0,r11
870 mtspr SPRN_MCSRR1,r12
871 lwz r9,GPR9(r1)
872 lwz r12,GPR12(r1)
873 lwz r10,GPR10(r1)
874 lwz r11,GPR11(r1)
875 lwz r1,GPR1(r1)
876 RFMCI
877 #endif /* CONFIG_BOOKE */
878
879 /*
880 * Load the DBCR0 value for a task that is being ptraced,
881 * having first saved away the global DBCR0. Note that r0
882 * has the dbcr0 value to set upon entry to this.
883 */
884 load_dbcr0:
885 mfmsr r10 /* first disable debug exceptions */
886 rlwinm r10,r10,0,~MSR_DE
887 mtmsr r10
888 isync
889 mfspr r10,SPRN_DBCR0
890 lis r11,global_dbcr0@ha
891 addi r11,r11,global_dbcr0@l
892 stw r10,0(r11)
893 mtspr SPRN_DBCR0,r0
894 lwz r10,4(r11)
895 addi r10,r10,1
896 stw r10,4(r11)
897 li r11,-1
898 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
899 blr
900
901 .comm global_dbcr0,8
902 #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
903
904 do_work: /* r10 contains MSR_KERNEL here */
905 andi. r0,r9,_TIF_NEED_RESCHED
906 beq do_user_signal
907
908 do_resched: /* r10 contains MSR_KERNEL here */
909 ori r10,r10,MSR_EE
910 SYNC
911 MTMSRD(r10) /* hard-enable interrupts */
912 bl schedule
913 recheck:
914 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
915 SYNC
916 MTMSRD(r10) /* disable interrupts */
917 rlwinm r9,r1,0,0,18
918 lwz r9,TI_FLAGS(r9)
919 andi. r0,r9,_TIF_NEED_RESCHED
920 bne- do_resched
921 andi. r0,r9,_TIF_SIGPENDING
922 beq restore_user
923 do_user_signal: /* r10 contains MSR_KERNEL here */
924 ori r10,r10,MSR_EE
925 SYNC
926 MTMSRD(r10) /* hard-enable interrupts */
927 /* save r13-r31 in the exception frame, if not already done */
928 lwz r3,TRAP(r1)
929 andi. r0,r3,1
930 beq 2f
931 SAVE_NVGPRS(r1)
932 rlwinm r3,r3,0,0,30
933 stw r3,TRAP(r1)
934 2: li r3,0
935 addi r4,r1,STACK_FRAME_OVERHEAD
936 bl do_signal
937 REST_NVGPRS(r1)
938 b recheck
939
940 /*
941 * We come here when we are at the end of handling an exception
942 * that occurred at a place where taking an exception will lose
943 * state information, such as the contents of SRR0 and SRR1.
944 */
945 nonrecoverable:
946 lis r10,exc_exit_restart_end@ha
947 addi r10,r10,exc_exit_restart_end@l
948 cmplw r12,r10
949 bge 3f
950 lis r11,exc_exit_restart@ha
951 addi r11,r11,exc_exit_restart@l
952 cmplw r12,r11
953 blt 3f
954 lis r10,ee_restarts@ha
955 lwz r12,ee_restarts@l(r10)
956 addi r12,r12,1
957 stw r12,ee_restarts@l(r10)
958 mr r12,r11 /* restart at exc_exit_restart */
959 blr
960 3: /* OK, we can't recover, kill this process */
961 /* but the 601 doesn't implement the RI bit, so assume it's OK */
962 BEGIN_FTR_SECTION
963 blr
964 END_FTR_SECTION_IFSET(CPU_FTR_601)
965 lwz r3,TRAP(r1)
966 andi. r0,r3,1
967 beq 4f
968 SAVE_NVGPRS(r1)
969 rlwinm r3,r3,0,0,30
970 stw r3,TRAP(r1)
971 4: addi r3,r1,STACK_FRAME_OVERHEAD
972 bl nonrecoverable_exception
973 /* shouldn't return */
974 b 4b
975
976 .comm ee_restarts,4
977
978 /*
979 * PROM code for specific machines follows. Put it
980 * here so it's easy to add arch-specific sections later.
981 * -- Cort
982 */
983 #ifdef CONFIG_PPC_OF
984 /*
985 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
986 * called with the MMU off.
987 */
988 _GLOBAL(enter_rtas)
989 stwu r1,-INT_FRAME_SIZE(r1)
990 mflr r0
991 stw r0,INT_FRAME_SIZE+4(r1)
992 lis r4,rtas_data@ha
993 lwz r4,rtas_data@l(r4)
994 lis r6,1f@ha /* physical return address for rtas */
995 addi r6,r6,1f@l
996 tophys(r6,r6)
997 tophys(r7,r1)
998 lis r8,rtas_entry@ha
999 lwz r8,rtas_entry@l(r8)
1000 mfmsr r9
1001 stw r9,8(r1)
1002 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1003 SYNC /* disable interrupts so SRR0/1 */
1004 MTMSRD(r0) /* don't get trashed */
1005 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1006 mtlr r6
1007 CLR_TOP32(r7)
1008 mtspr SPRN_SPRG2,r7
1009 mtspr SPRN_SRR0,r8
1010 mtspr SPRN_SRR1,r9
1011 RFI
1012 1: tophys(r9,r1)
1013 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
1014 lwz r9,8(r9) /* original msr value */
1015 FIX_SRR1(r9,r0)
1016 addi r1,r1,INT_FRAME_SIZE
1017 li r0,0
1018 mtspr SPRN_SPRG2,r0
1019 mtspr SPRN_SRR0,r8
1020 mtspr SPRN_SRR1,r9
1021 RFI /* return to caller */
1022
1023 .globl machine_check_in_rtas
1024 machine_check_in_rtas:
1025 twi 31,0,0
1026 /* XXX load up BATs and panic */
1027
1028 #endif /* CONFIG_PPC_OF */
This page took 0.051938 seconds and 4 git commands to generate.