Pull mca-cleanup into release branch
[deliverable/linux.git] / arch / powerpc / kernel / entry_64.S
CommitLineData
9994a338
PM
1/*
2 * arch/ppc64/kernel/entry.S
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
7 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
8 * Adapted for Power Macintosh by Paul Mackerras.
9 * Low-level exception handlers and MMU support
10 * rewritten by Paul Mackerras.
11 * Copyright (C) 1996 Paul Mackerras.
12 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
13 *
14 * This file contains the system call entry code, context switch
15 * code, and exception/interrupt return code for PowerPC.
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 */
22
23#include <linux/config.h>
24#include <linux/errno.h>
25#include <asm/unistd.h>
26#include <asm/processor.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/thread_info.h>
30#include <asm/ppc_asm.h>
31#include <asm/asm-offsets.h>
32#include <asm/cputable.h>
33
34#ifdef CONFIG_PPC_ISERIES
35#define DO_SOFT_DISABLE
36#endif
37
38/*
39 * System calls.
40 */
41 .section ".toc","aw"
42.SYS_CALL_TABLE:
43 .tc .sys_call_table[TC],.sys_call_table
44
45/* This value is used to mark exception frames on the stack. */
46exception_marker:
47 .tc ID_72656773_68657265[TC],0x7265677368657265
48
49 .section ".text"
50 .align 7
51
52#undef SHOW_SYSCALLS
53
54 .globl system_call_common
55system_call_common:
56 andi. r10,r12,MSR_PR
57 mr r10,r1
58 addi r1,r1,-INT_FRAME_SIZE
59 beq- 1f
60 ld r1,PACAKSAVE(r13)
611: std r10,0(r1)
62 std r11,_NIP(r1)
63 std r12,_MSR(r1)
64 std r0,GPR0(r1)
65 std r10,GPR1(r1)
66 std r2,GPR2(r1)
67 std r3,GPR3(r1)
68 std r4,GPR4(r1)
69 std r5,GPR5(r1)
70 std r6,GPR6(r1)
71 std r7,GPR7(r1)
72 std r8,GPR8(r1)
73 li r11,0
74 std r11,GPR9(r1)
75 std r11,GPR10(r1)
76 std r11,GPR11(r1)
77 std r11,GPR12(r1)
78 std r9,GPR13(r1)
79 crclr so
80 mfcr r9
81 mflr r10
82 li r11,0xc01
83 std r9,_CCR(r1)
84 std r10,_LINK(r1)
85 std r11,_TRAP(r1)
86 mfxer r9
87 mfctr r10
88 std r9,_XER(r1)
89 std r10,_CTR(r1)
90 std r3,ORIG_GPR3(r1)
91 ld r2,PACATOC(r13)
92 addi r9,r1,STACK_FRAME_OVERHEAD
93 ld r11,exception_marker@toc(r2)
94 std r11,-16(r9) /* "regshere" marker */
95#ifdef CONFIG_PPC_ISERIES
96 /* Hack for handling interrupts when soft-enabling on iSeries */
97 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
98 andi. r10,r12,MSR_PR /* from kernel */
99 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
100 beq hardware_interrupt_entry
101 lbz r10,PACAPROCENABLED(r13)
102 std r10,SOFTE(r1)
103#endif
104 mfmsr r11
105 ori r11,r11,MSR_EE
106 mtmsrd r11,1
107
108#ifdef SHOW_SYSCALLS
109 bl .do_show_syscall
110 REST_GPR(0,r1)
111 REST_4GPRS(3,r1)
112 REST_2GPRS(7,r1)
113 addi r9,r1,STACK_FRAME_OVERHEAD
114#endif
115 clrrdi r11,r1,THREAD_SHIFT
9994a338 116 ld r10,TI_FLAGS(r11)
9994a338
PM
117 andi. r11,r10,_TIF_SYSCALL_T_OR_A
118 bne- syscall_dotrace
119syscall_dotrace_cont:
120 cmpldi 0,r0,NR_syscalls
121 bge- syscall_enosys
122
123system_call: /* label this so stack traces look sane */
124/*
125 * Need to vector to 32 Bit or default sys_call_table here,
126 * based on caller's run-mode / personality.
127 */
128 ld r11,.SYS_CALL_TABLE@toc(2)
129 andi. r10,r10,_TIF_32BIT
130 beq 15f
131 addi r11,r11,8 /* use 32-bit syscall entries */
132 clrldi r3,r3,32
133 clrldi r4,r4,32
134 clrldi r5,r5,32
135 clrldi r6,r6,32
136 clrldi r7,r7,32
137 clrldi r8,r8,32
13815:
139 slwi r0,r0,4
140 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
141 mtctr r10
142 bctrl /* Call handler */
143
144syscall_exit:
401d1f02 145 std r3,RESULT(r1)
9994a338 146#ifdef SHOW_SYSCALLS
9994a338 147 bl .do_show_syscall_exit
401d1f02 148 ld r3,RESULT(r1)
9994a338 149#endif
9994a338 150 clrrdi r12,r1,THREAD_SHIFT
9994a338
PM
151
152 /* disable interrupts so current_thread_info()->flags can't change,
153 and so that we don't get interrupted after loading SRR0/1. */
154 ld r8,_MSR(r1)
155 andi. r10,r8,MSR_RI
156 beq- unrecov_restore
157 mfmsr r10
158 rldicl r10,r10,48,1
159 rotldi r10,r10,16
160 mtmsrd r10,1
161 ld r9,TI_FLAGS(r12)
401d1f02 162 li r11,-_LAST_ERRNO
1bd79336 163 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
9994a338 164 bne- syscall_exit_work
401d1f02
DW
165 cmpld r3,r11
166 ld r5,_CCR(r1)
167 bge- syscall_error
168syscall_error_cont:
9994a338
PM
169 ld r7,_NIP(r1)
170 stdcx. r0,0,r1 /* to clear the reservation */
171 andi. r6,r8,MSR_PR
172 ld r4,_LINK(r1)
173 beq- 1f /* only restore r13 if */
174 ld r13,GPR13(r1) /* returning to usermode */
1751: ld r2,GPR2(r1)
176 li r12,MSR_RI
3eb6f26b
PM
177 andc r11,r10,r12
178 mtmsrd r11,1 /* clear MSR.RI */
9994a338
PM
179 ld r1,GPR1(r1)
180 mtlr r4
181 mtcr r5
182 mtspr SPRN_SRR0,r7
183 mtspr SPRN_SRR1,r8
184 rfid
185 b . /* prevent speculative execution */
186
401d1f02 187syscall_error:
9994a338 188 oris r5,r5,0x1000 /* Set SO bit in CR */
401d1f02 189 neg r3,r3
9994a338
PM
190 std r5,_CCR(r1)
191 b syscall_error_cont
401d1f02 192
9994a338
PM
193/* Traced system call support */
194syscall_dotrace:
195 bl .save_nvgprs
196 addi r3,r1,STACK_FRAME_OVERHEAD
197 bl .do_syscall_trace_enter
198 ld r0,GPR0(r1) /* Restore original registers */
199 ld r3,GPR3(r1)
200 ld r4,GPR4(r1)
201 ld r5,GPR5(r1)
202 ld r6,GPR6(r1)
203 ld r7,GPR7(r1)
204 ld r8,GPR8(r1)
205 addi r9,r1,STACK_FRAME_OVERHEAD
206 clrrdi r10,r1,THREAD_SHIFT
207 ld r10,TI_FLAGS(r10)
208 b syscall_dotrace_cont
209
401d1f02
DW
210syscall_enosys:
211 li r3,-ENOSYS
212 b syscall_exit
213
214syscall_exit_work:
215 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
216 If TIF_NOERROR is set, just save r3 as it is. */
217
218 andi. r0,r9,_TIF_RESTOREALL
1bd79336
PM
219 beq+ 0f
220 REST_NVGPRS(r1)
221 b 2f
2220: cmpld r3,r11 /* r10 is -LAST_ERRNO */
401d1f02
DW
223 blt+ 1f
224 andi. r0,r9,_TIF_NOERROR
225 bne- 1f
226 ld r5,_CCR(r1)
227 neg r3,r3
228 oris r5,r5,0x1000 /* Set SO bit in CR */
229 std r5,_CCR(r1)
2301: std r3,GPR3(r1)
2312: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
232 beq 4f
233
1bd79336 234 /* Clear per-syscall TIF flags if any are set. */
401d1f02
DW
235
236 li r11,_TIF_PERSYSCALL_MASK
237 addi r12,r12,TI_FLAGS
2383: ldarx r10,0,r12
239 andc r10,r10,r11
240 stdcx. r10,0,r12
241 bne- 3b
242 subi r12,r12,TI_FLAGS
1bd79336
PM
243
2444: /* Anything else left to do? */
245 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
401d1f02
DW
246 beq .ret_from_except_lite
247
248 /* Re-enable interrupts */
249 mfmsr r10
250 ori r10,r10,MSR_EE
251 mtmsrd r10,1
252
1bd79336 253 bl .save_nvgprs
9994a338
PM
254 addi r3,r1,STACK_FRAME_OVERHEAD
255 bl .do_syscall_trace_leave
1bd79336 256 b .ret_from_except
9994a338
PM
257
258/* Save non-volatile GPRs, if not already saved. */
259_GLOBAL(save_nvgprs)
260 ld r11,_TRAP(r1)
261 andi. r0,r11,1
262 beqlr-
263 SAVE_NVGPRS(r1)
264 clrrdi r0,r11,1
265 std r0,_TRAP(r1)
266 blr
267
401d1f02 268
9994a338
PM
269/*
270 * The sigsuspend and rt_sigsuspend system calls can call do_signal
271 * and thus put the process into the stopped state where we might
272 * want to examine its user state with ptrace. Therefore we need
273 * to save all the nonvolatile registers (r14 - r31) before calling
274 * the C code. Similarly, fork, vfork and clone need the full
275 * register state on the stack so that it can be copied to the child.
276 */
9994a338
PM
277
278_GLOBAL(ppc_fork)
279 bl .save_nvgprs
280 bl .sys_fork
281 b syscall_exit
282
283_GLOBAL(ppc_vfork)
284 bl .save_nvgprs
285 bl .sys_vfork
286 b syscall_exit
287
288_GLOBAL(ppc_clone)
289 bl .save_nvgprs
290 bl .sys_clone
291 b syscall_exit
292
1bd79336
PM
293_GLOBAL(ppc32_swapcontext)
294 bl .save_nvgprs
295 bl .compat_sys_swapcontext
296 b syscall_exit
297
298_GLOBAL(ppc64_swapcontext)
299 bl .save_nvgprs
300 bl .sys_swapcontext
301 b syscall_exit
302
9994a338
PM
303_GLOBAL(ret_from_fork)
304 bl .schedule_tail
305 REST_NVGPRS(r1)
306 li r3,0
307 b syscall_exit
308
309/*
310 * This routine switches between two different tasks. The process
311 * state of one is saved on its kernel stack. Then the state
312 * of the other is restored from its kernel stack. The memory
313 * management hardware is updated to the second process's state.
314 * Finally, we can return to the second process, via ret_from_except.
315 * On entry, r3 points to the THREAD for the current task, r4
316 * points to the THREAD for the new task.
317 *
318 * Note: there are two ways to get to the "going out" portion
319 * of this code; either by coming in via the entry (_switch)
320 * or via "fork" which must set up an environment equivalent
321 * to the "_switch" path. If you change this you'll have to change
322 * the fork code also.
323 *
324 * The code which creates the new task context is in 'copy_thread'
325 * in arch/ppc64/kernel/process.c
326 */
327 .align 7
328_GLOBAL(_switch)
329 mflr r0
330 std r0,16(r1)
331 stdu r1,-SWITCH_FRAME_SIZE(r1)
332 /* r3-r13 are caller saved -- Cort */
333 SAVE_8GPRS(14, r1)
334 SAVE_10GPRS(22, r1)
335 mflr r20 /* Return to switch caller */
336 mfmsr r22
337 li r0, MSR_FP
338#ifdef CONFIG_ALTIVEC
339BEGIN_FTR_SECTION
340 oris r0,r0,MSR_VEC@h /* Disable altivec */
341 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
342 std r24,THREAD_VRSAVE(r3)
343END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
344#endif /* CONFIG_ALTIVEC */
345 and. r0,r0,r22
346 beq+ 1f
347 andc r22,r22,r0
348 mtmsrd r22
349 isync
3501: std r20,_NIP(r1)
351 mfcr r23
352 std r23,_CCR(r1)
353 std r1,KSP(r3) /* Set old stack pointer */
354
355#ifdef CONFIG_SMP
356 /* We need a sync somewhere here to make sure that if the
357 * previous task gets rescheduled on another CPU, it sees all
358 * stores it has performed on this one.
359 */
360 sync
361#endif /* CONFIG_SMP */
362
363 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
364 std r6,PACACURRENT(r13) /* Set new 'current' */
365
366 ld r8,KSP(r4) /* new stack pointer */
367BEGIN_FTR_SECTION
368 clrrdi r6,r8,28 /* get its ESID */
369 clrrdi r9,r1,28 /* get current sp ESID */
370 clrldi. r0,r6,2 /* is new ESID c00000000? */
371 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
372 cror eq,4*cr1+eq,eq
373 beq 2f /* if yes, don't slbie it */
374
375 /* Bolt in the new stack SLB entry */
376 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
377 oris r0,r6,(SLB_ESID_V)@h
378 ori r0,r0,(SLB_NUM_BOLTED-1)@l
379 slbie r6
380 slbie r6 /* Workaround POWER5 < DD2.1 issue */
381 slbmte r7,r0
382 isync
383
3842:
385END_FTR_SECTION_IFSET(CPU_FTR_SLB)
386 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
387 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
388 because we don't need to leave the 288-byte ABI gap at the
389 top of the kernel stack. */
390 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
391
392 mr r1,r8 /* start using new stack pointer */
393 std r7,PACAKSAVE(r13)
394
395 ld r6,_CCR(r1)
396 mtcrf 0xFF,r6
397
398#ifdef CONFIG_ALTIVEC
399BEGIN_FTR_SECTION
400 ld r0,THREAD_VRSAVE(r4)
401 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
402END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
403#endif /* CONFIG_ALTIVEC */
404
405 /* r3-r13 are destroyed -- Cort */
406 REST_8GPRS(14, r1)
407 REST_10GPRS(22, r1)
408
409 /* convert old thread to its task_struct for return value */
410 addi r3,r3,-THREAD
411 ld r7,_NIP(r1) /* Return to _switch caller in new task */
412 mtlr r7
413 addi r1,r1,SWITCH_FRAME_SIZE
414 blr
415
416 .align 7
417_GLOBAL(ret_from_except)
418 ld r11,_TRAP(r1)
419 andi. r0,r11,1
420 bne .ret_from_except_lite
421 REST_NVGPRS(r1)
422
423_GLOBAL(ret_from_except_lite)
424 /*
425 * Disable interrupts so that current_thread_info()->flags
426 * can't change between when we test it and when we return
427 * from the interrupt.
428 */
429 mfmsr r10 /* Get current interrupt state */
430 rldicl r9,r10,48,1 /* clear MSR_EE */
431 rotldi r9,r9,16
432 mtmsrd r9,1 /* Update machine state */
433
434#ifdef CONFIG_PREEMPT
435 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
436 li r0,_TIF_NEED_RESCHED /* bits to check */
437 ld r3,_MSR(r1)
438 ld r4,TI_FLAGS(r9)
439 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
440 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
441 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
442 bne do_work
443
444#else /* !CONFIG_PREEMPT */
445 ld r3,_MSR(r1) /* Returning to user mode? */
446 andi. r3,r3,MSR_PR
447 beq restore /* if not, just restore regs and return */
448
449 /* Check current_thread_info()->flags */
450 clrrdi r9,r1,THREAD_SHIFT
451 ld r4,TI_FLAGS(r9)
452 andi. r0,r4,_TIF_USER_WORK_MASK
453 bne do_work
454#endif
455
456restore:
457#ifdef CONFIG_PPC_ISERIES
458 ld r5,SOFTE(r1)
459 cmpdi 0,r5,0
460 beq 4f
461 /* Check for pending interrupts (iSeries) */
3356bb9f
DG
462 ld r3,PACALPPACAPTR(r13)
463 ld r3,LPPACAANYINT(r3)
9994a338
PM
464 cmpdi r3,0
465 beq+ 4f /* skip do_IRQ if no interrupts */
466
467 li r3,0
468 stb r3,PACAPROCENABLED(r13) /* ensure we are soft-disabled */
469 ori r10,r10,MSR_EE
470 mtmsrd r10 /* hard-enable again */
471 addi r3,r1,STACK_FRAME_OVERHEAD
472 bl .do_IRQ
473 b .ret_from_except_lite /* loop back and handle more */
474
4754: stb r5,PACAPROCENABLED(r13)
476#endif
477
478 ld r3,_MSR(r1)
479 andi. r0,r3,MSR_RI
480 beq- unrecov_restore
481
482 andi. r0,r3,MSR_PR
483
484 /*
485 * r13 is our per cpu area, only restore it if we are returning to
486 * userspace
487 */
488 beq 1f
489 REST_GPR(13, r1)
4901:
491 ld r3,_CTR(r1)
492 ld r0,_LINK(r1)
493 mtctr r3
494 mtlr r0
495 ld r3,_XER(r1)
496 mtspr SPRN_XER,r3
497
498 REST_8GPRS(5, r1)
499
500 stdcx. r0,0,r1 /* to clear the reservation */
501
502 mfmsr r0
503 li r2, MSR_RI
504 andc r0,r0,r2
505 mtmsrd r0,1
506
507 ld r0,_MSR(r1)
508 mtspr SPRN_SRR1,r0
509
510 ld r2,_CCR(r1)
511 mtcrf 0xFF,r2
512 ld r2,_NIP(r1)
513 mtspr SPRN_SRR0,r2
514
515 ld r0,GPR0(r1)
516 ld r2,GPR2(r1)
517 ld r3,GPR3(r1)
518 ld r4,GPR4(r1)
519 ld r1,GPR1(r1)
520
521 rfid
522 b . /* prevent speculative execution */
523
524/* Note: this must change if we start using the TIF_NOTIFY_RESUME bit */
525do_work:
526#ifdef CONFIG_PREEMPT
527 andi. r0,r3,MSR_PR /* Returning to user mode? */
528 bne user_work
529 /* Check that preempt_count() == 0 and interrupts are enabled */
530 lwz r8,TI_PREEMPT(r9)
531 cmpwi cr1,r8,0
532#ifdef CONFIG_PPC_ISERIES
533 ld r0,SOFTE(r1)
534 cmpdi r0,0
535#else
536 andi. r0,r3,MSR_EE
537#endif
538 crandc eq,cr1*4+eq,eq
539 bne restore
540 /* here we are preempting the current task */
5411:
542#ifdef CONFIG_PPC_ISERIES
543 li r0,1
544 stb r0,PACAPROCENABLED(r13)
545#endif
546 ori r10,r10,MSR_EE
547 mtmsrd r10,1 /* reenable interrupts */
548 bl .preempt_schedule
549 mfmsr r10
550 clrrdi r9,r1,THREAD_SHIFT
551 rldicl r10,r10,48,1 /* disable interrupts again */
552 rotldi r10,r10,16
553 mtmsrd r10,1
554 ld r4,TI_FLAGS(r9)
555 andi. r0,r4,_TIF_NEED_RESCHED
556 bne 1b
557 b restore
558
559user_work:
560#endif
561 /* Enable interrupts */
562 ori r10,r10,MSR_EE
563 mtmsrd r10,1
564
565 andi. r0,r4,_TIF_NEED_RESCHED
566 beq 1f
567 bl .schedule
568 b .ret_from_except_lite
569
5701: bl .save_nvgprs
571 li r3,0
572 addi r4,r1,STACK_FRAME_OVERHEAD
573 bl .do_signal
574 b .ret_from_except
575
576unrecov_restore:
577 addi r3,r1,STACK_FRAME_OVERHEAD
578 bl .unrecoverable_exception
579 b unrecov_restore
580
581#ifdef CONFIG_PPC_RTAS
582/*
583 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
584 * called with the MMU off.
585 *
586 * In addition, we need to be in 32b mode, at least for now.
587 *
588 * Note: r3 is an input parameter to rtas, so don't trash it...
589 */
590_GLOBAL(enter_rtas)
591 mflr r0
592 std r0,16(r1)
593 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
594
595 /* Because RTAS is running in 32b mode, it clobbers the high order half
596 * of all registers that it saves. We therefore save those registers
597 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
598 */
599 SAVE_GPR(2, r1) /* Save the TOC */
600 SAVE_GPR(13, r1) /* Save paca */
601 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
602 SAVE_10GPRS(22, r1) /* ditto */
603
604 mfcr r4
605 std r4,_CCR(r1)
606 mfctr r5
607 std r5,_CTR(r1)
608 mfspr r6,SPRN_XER
609 std r6,_XER(r1)
610 mfdar r7
611 std r7,_DAR(r1)
612 mfdsisr r8
613 std r8,_DSISR(r1)
614 mfsrr0 r9
615 std r9,_SRR0(r1)
616 mfsrr1 r10
617 std r10,_SRR1(r1)
618
619 /* There is no way it is acceptable to get here with interrupts enabled,
620 * check it with the asm equivalent of WARN_ON
621 */
622 mfmsr r6
623 andi. r0,r6,MSR_EE
6241: tdnei r0,0
625.section __bug_table,"a"
626 .llong 1b,__LINE__ + 0x1000000, 1f, 2f
627.previous
628.section .rodata,"a"
6291: .asciz __FILE__
6302: .asciz "enter_rtas"
631.previous
632
633 /* Unfortunately, the stack pointer and the MSR are also clobbered,
634 * so they are saved in the PACA which allows us to restore
635 * our original state after RTAS returns.
636 */
637 std r1,PACAR1(r13)
638 std r6,PACASAVEDMSR(r13)
639
640 /* Setup our real return addr */
e58c3495
DG
641 LOAD_REG_ADDR(r4,.rtas_return_loc)
642 clrldi r4,r4,2 /* convert to realmode address */
9994a338
PM
643 mtlr r4
644
645 li r0,0
646 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
647 andc r0,r6,r0
648
649 li r9,1
650 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
651 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP
652 andc r6,r0,r9
653 ori r6,r6,MSR_RI
654 sync /* disable interrupts so SRR0/1 */
655 mtmsrd r0 /* don't get trashed */
656
e58c3495 657 LOAD_REG_ADDR(r4, rtas)
9994a338
PM
658 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
659 ld r4,RTASBASE(r4) /* get the rtas->base value */
660
661 mtspr SPRN_SRR0,r5
662 mtspr SPRN_SRR1,r6
663 rfid
664 b . /* prevent speculative execution */
665
666_STATIC(rtas_return_loc)
667 /* relocation is off at this point */
668 mfspr r4,SPRN_SPRG3 /* Get PACA */
e58c3495 669 clrldi r4,r4,2 /* convert to realmode address */
9994a338
PM
670
671 mfmsr r6
672 li r0,MSR_RI
673 andc r6,r6,r0
674 sync
675 mtmsrd r6
676
677 ld r1,PACAR1(r4) /* Restore our SP */
e58c3495 678 LOAD_REG_IMMEDIATE(r3,.rtas_restore_regs)
9994a338
PM
679 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
680
681 mtspr SPRN_SRR0,r3
682 mtspr SPRN_SRR1,r4
683 rfid
684 b . /* prevent speculative execution */
685
686_STATIC(rtas_restore_regs)
687 /* relocation is on at this point */
688 REST_GPR(2, r1) /* Restore the TOC */
689 REST_GPR(13, r1) /* Restore paca */
690 REST_8GPRS(14, r1) /* Restore the non-volatiles */
691 REST_10GPRS(22, r1) /* ditto */
692
693 mfspr r13,SPRN_SPRG3
694
695 ld r4,_CCR(r1)
696 mtcr r4
697 ld r5,_CTR(r1)
698 mtctr r5
699 ld r6,_XER(r1)
700 mtspr SPRN_XER,r6
701 ld r7,_DAR(r1)
702 mtdar r7
703 ld r8,_DSISR(r1)
704 mtdsisr r8
705 ld r9,_SRR0(r1)
706 mtsrr0 r9
707 ld r10,_SRR1(r1)
708 mtsrr1 r10
709
710 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
711 ld r0,16(r1) /* get return address */
712
713 mtlr r0
714 blr /* return to caller */
715
716#endif /* CONFIG_PPC_RTAS */
717
718#ifdef CONFIG_PPC_MULTIPLATFORM
719
720_GLOBAL(enter_prom)
721 mflr r0
722 std r0,16(r1)
723 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
724
725 /* Because PROM is running in 32b mode, it clobbers the high order half
726 * of all registers that it saves. We therefore save those registers
727 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
728 */
729 SAVE_8GPRS(2, r1)
730 SAVE_GPR(13, r1)
731 SAVE_8GPRS(14, r1)
732 SAVE_10GPRS(22, r1)
733 mfcr r4
734 std r4,_CCR(r1)
735 mfctr r5
736 std r5,_CTR(r1)
737 mfspr r6,SPRN_XER
738 std r6,_XER(r1)
739 mfdar r7
740 std r7,_DAR(r1)
741 mfdsisr r8
742 std r8,_DSISR(r1)
743 mfsrr0 r9
744 std r9,_SRR0(r1)
745 mfsrr1 r10
746 std r10,_SRR1(r1)
747 mfmsr r11
748 std r11,_MSR(r1)
749
750 /* Get the PROM entrypoint */
751 ld r0,GPR4(r1)
752 mtlr r0
753
754 /* Switch MSR to 32 bits mode
755 */
756 mfmsr r11
757 li r12,1
758 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
759 andc r11,r11,r12
760 li r12,1
761 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
762 andc r11,r11,r12
763 mtmsrd r11
764 isync
765
766 /* Restore arguments & enter PROM here... */
767 ld r3,GPR3(r1)
768 blrl
769
770 /* Just make sure that r1 top 32 bits didn't get
771 * corrupt by OF
772 */
773 rldicl r1,r1,0,32
774
775 /* Restore the MSR (back to 64 bits) */
776 ld r0,_MSR(r1)
777 mtmsrd r0
778 isync
779
780 /* Restore other registers */
781 REST_GPR(2, r1)
782 REST_GPR(13, r1)
783 REST_8GPRS(14, r1)
784 REST_10GPRS(22, r1)
785 ld r4,_CCR(r1)
786 mtcr r4
787 ld r5,_CTR(r1)
788 mtctr r5
789 ld r6,_XER(r1)
790 mtspr SPRN_XER,r6
791 ld r7,_DAR(r1)
792 mtdar r7
793 ld r8,_DSISR(r1)
794 mtdsisr r8
795 ld r9,_SRR0(r1)
796 mtsrr0 r9
797 ld r10,_SRR1(r1)
798 mtsrr1 r10
799
800 addi r1,r1,PROM_FRAME_SIZE
801 ld r0,16(r1)
802 mtlr r0
803 blr
804
805#endif /* CONFIG_PPC_MULTIPLATFORM */
This page took 0.101081 seconds and 5 git commands to generate.