Merge branch 'upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/linville...
[deliverable/linux.git] / arch / powerpc / kernel / entry_64.S
1 /*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
21 #include <linux/config.h>
22 #include <linux/errno.h>
23 #include <asm/unistd.h>
24 #include <asm/processor.h>
25 #include <asm/page.h>
26 #include <asm/mmu.h>
27 #include <asm/thread_info.h>
28 #include <asm/ppc_asm.h>
29 #include <asm/asm-offsets.h>
30 #include <asm/cputable.h>
31
32 #ifdef CONFIG_PPC_ISERIES
33 #define DO_SOFT_DISABLE
34 #endif
35
36 /*
37 * System calls.
38 */
39 .section ".toc","aw"
40 .SYS_CALL_TABLE:
41 .tc .sys_call_table[TC],.sys_call_table
42
43 /* This value is used to mark exception frames on the stack. */
44 exception_marker:
45 .tc ID_72656773_68657265[TC],0x7265677368657265
46
47 .section ".text"
48 .align 7
49
50 #undef SHOW_SYSCALLS
51
52 .globl system_call_common
53 system_call_common:
54 andi. r10,r12,MSR_PR
55 mr r10,r1
56 addi r1,r1,-INT_FRAME_SIZE
57 beq- 1f
58 ld r1,PACAKSAVE(r13)
59 1: std r10,0(r1)
60 std r11,_NIP(r1)
61 std r12,_MSR(r1)
62 std r0,GPR0(r1)
63 std r10,GPR1(r1)
64 ACCOUNT_CPU_USER_ENTRY(r10, r11)
65 std r2,GPR2(r1)
66 std r3,GPR3(r1)
67 std r4,GPR4(r1)
68 std r5,GPR5(r1)
69 std r6,GPR6(r1)
70 std r7,GPR7(r1)
71 std r8,GPR8(r1)
72 li r11,0
73 std r11,GPR9(r1)
74 std r11,GPR10(r1)
75 std r11,GPR11(r1)
76 std r11,GPR12(r1)
77 std r9,GPR13(r1)
78 crclr so
79 mfcr r9
80 mflr r10
81 li r11,0xc01
82 std r9,_CCR(r1)
83 std r10,_LINK(r1)
84 std r11,_TRAP(r1)
85 mfxer r9
86 mfctr r10
87 std r9,_XER(r1)
88 std r10,_CTR(r1)
89 std r3,ORIG_GPR3(r1)
90 ld r2,PACATOC(r13)
91 addi r9,r1,STACK_FRAME_OVERHEAD
92 ld r11,exception_marker@toc(r2)
93 std r11,-16(r9) /* "regshere" marker */
94 #ifdef CONFIG_PPC_ISERIES
95 /* Hack for handling interrupts when soft-enabling on iSeries */
96 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
97 andi. r10,r12,MSR_PR /* from kernel */
98 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
99 beq hardware_interrupt_entry
100 lbz r10,PACAPROCENABLED(r13)
101 std r10,SOFTE(r1)
102 #endif
103 mfmsr r11
104 ori r11,r11,MSR_EE
105 mtmsrd r11,1
106
107 #ifdef SHOW_SYSCALLS
108 bl .do_show_syscall
109 REST_GPR(0,r1)
110 REST_4GPRS(3,r1)
111 REST_2GPRS(7,r1)
112 addi r9,r1,STACK_FRAME_OVERHEAD
113 #endif
114 clrrdi r11,r1,THREAD_SHIFT
115 ld r10,TI_FLAGS(r11)
116 andi. r11,r10,_TIF_SYSCALL_T_OR_A
117 bne- syscall_dotrace
118 syscall_dotrace_cont:
119 cmpldi 0,r0,NR_syscalls
120 bge- syscall_enosys
121
122 system_call: /* label this so stack traces look sane */
123 /*
124 * Need to vector to 32 Bit or default sys_call_table here,
125 * based on caller's run-mode / personality.
126 */
127 ld r11,.SYS_CALL_TABLE@toc(2)
128 andi. r10,r10,_TIF_32BIT
129 beq 15f
130 addi r11,r11,8 /* use 32-bit syscall entries */
131 clrldi r3,r3,32
132 clrldi r4,r4,32
133 clrldi r5,r5,32
134 clrldi r6,r6,32
135 clrldi r7,r7,32
136 clrldi r8,r8,32
137 15:
138 slwi r0,r0,4
139 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
140 mtctr r10
141 bctrl /* Call handler */
142
143 syscall_exit:
144 std r3,RESULT(r1)
145 #ifdef SHOW_SYSCALLS
146 bl .do_show_syscall_exit
147 ld r3,RESULT(r1)
148 #endif
149 clrrdi r12,r1,THREAD_SHIFT
150
151 /* disable interrupts so current_thread_info()->flags can't change,
152 and so that we don't get interrupted after loading SRR0/1. */
153 ld r8,_MSR(r1)
154 andi. r10,r8,MSR_RI
155 beq- unrecov_restore
156 mfmsr r10
157 rldicl r10,r10,48,1
158 rotldi r10,r10,16
159 mtmsrd r10,1
160 ld r9,TI_FLAGS(r12)
161 li r11,-_LAST_ERRNO
162 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
163 bne- syscall_exit_work
164 cmpld r3,r11
165 ld r5,_CCR(r1)
166 bge- syscall_error
167 syscall_error_cont:
168 ld r7,_NIP(r1)
169 stdcx. r0,0,r1 /* to clear the reservation */
170 andi. r6,r8,MSR_PR
171 ld r4,_LINK(r1)
172 beq- 1f
173 ACCOUNT_CPU_USER_EXIT(r11, r12)
174 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
175 1: ld r2,GPR2(r1)
176 li r12,MSR_RI
177 andc r11,r10,r12
178 mtmsrd r11,1 /* clear MSR.RI */
179 ld r1,GPR1(r1)
180 mtlr r4
181 mtcr r5
182 mtspr SPRN_SRR0,r7
183 mtspr SPRN_SRR1,r8
184 rfid
185 b . /* prevent speculative execution */
186
187 syscall_error:
188 oris r5,r5,0x1000 /* Set SO bit in CR */
189 neg r3,r3
190 std r5,_CCR(r1)
191 b syscall_error_cont
192
193 /* Traced system call support */
194 syscall_dotrace:
195 bl .save_nvgprs
196 addi r3,r1,STACK_FRAME_OVERHEAD
197 bl .do_syscall_trace_enter
198 ld r0,GPR0(r1) /* Restore original registers */
199 ld r3,GPR3(r1)
200 ld r4,GPR4(r1)
201 ld r5,GPR5(r1)
202 ld r6,GPR6(r1)
203 ld r7,GPR7(r1)
204 ld r8,GPR8(r1)
205 addi r9,r1,STACK_FRAME_OVERHEAD
206 clrrdi r10,r1,THREAD_SHIFT
207 ld r10,TI_FLAGS(r10)
208 b syscall_dotrace_cont
209
210 syscall_enosys:
211 li r3,-ENOSYS
212 b syscall_exit
213
214 syscall_exit_work:
215 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
216 If TIF_NOERROR is set, just save r3 as it is. */
217
218 andi. r0,r9,_TIF_RESTOREALL
219 beq+ 0f
220 REST_NVGPRS(r1)
221 b 2f
222 0: cmpld r3,r11 /* r10 is -LAST_ERRNO */
223 blt+ 1f
224 andi. r0,r9,_TIF_NOERROR
225 bne- 1f
226 ld r5,_CCR(r1)
227 neg r3,r3
228 oris r5,r5,0x1000 /* Set SO bit in CR */
229 std r5,_CCR(r1)
230 1: std r3,GPR3(r1)
231 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
232 beq 4f
233
234 /* Clear per-syscall TIF flags if any are set. */
235
236 li r11,_TIF_PERSYSCALL_MASK
237 addi r12,r12,TI_FLAGS
238 3: ldarx r10,0,r12
239 andc r10,r10,r11
240 stdcx. r10,0,r12
241 bne- 3b
242 subi r12,r12,TI_FLAGS
243
244 4: /* Anything else left to do? */
245 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
246 beq .ret_from_except_lite
247
248 /* Re-enable interrupts */
249 mfmsr r10
250 ori r10,r10,MSR_EE
251 mtmsrd r10,1
252
253 bl .save_nvgprs
254 addi r3,r1,STACK_FRAME_OVERHEAD
255 bl .do_syscall_trace_leave
256 b .ret_from_except
257
258 /* Save non-volatile GPRs, if not already saved. */
259 _GLOBAL(save_nvgprs)
260 ld r11,_TRAP(r1)
261 andi. r0,r11,1
262 beqlr-
263 SAVE_NVGPRS(r1)
264 clrrdi r0,r11,1
265 std r0,_TRAP(r1)
266 blr
267
268
269 /*
270 * The sigsuspend and rt_sigsuspend system calls can call do_signal
271 * and thus put the process into the stopped state where we might
272 * want to examine its user state with ptrace. Therefore we need
273 * to save all the nonvolatile registers (r14 - r31) before calling
274 * the C code. Similarly, fork, vfork and clone need the full
275 * register state on the stack so that it can be copied to the child.
276 */
277
278 _GLOBAL(ppc_fork)
279 bl .save_nvgprs
280 bl .sys_fork
281 b syscall_exit
282
283 _GLOBAL(ppc_vfork)
284 bl .save_nvgprs
285 bl .sys_vfork
286 b syscall_exit
287
288 _GLOBAL(ppc_clone)
289 bl .save_nvgprs
290 bl .sys_clone
291 b syscall_exit
292
293 _GLOBAL(ppc32_swapcontext)
294 bl .save_nvgprs
295 bl .compat_sys_swapcontext
296 b syscall_exit
297
298 _GLOBAL(ppc64_swapcontext)
299 bl .save_nvgprs
300 bl .sys_swapcontext
301 b syscall_exit
302
303 _GLOBAL(ret_from_fork)
304 bl .schedule_tail
305 REST_NVGPRS(r1)
306 li r3,0
307 b syscall_exit
308
309 /*
310 * This routine switches between two different tasks. The process
311 * state of one is saved on its kernel stack. Then the state
312 * of the other is restored from its kernel stack. The memory
313 * management hardware is updated to the second process's state.
314 * Finally, we can return to the second process, via ret_from_except.
315 * On entry, r3 points to the THREAD for the current task, r4
316 * points to the THREAD for the new task.
317 *
318 * Note: there are two ways to get to the "going out" portion
319 * of this code; either by coming in via the entry (_switch)
320 * or via "fork" which must set up an environment equivalent
321 * to the "_switch" path. If you change this you'll have to change
322 * the fork code also.
323 *
324 * The code which creates the new task context is in 'copy_thread'
325 * in arch/powerpc/kernel/process.c
326 */
327 .align 7
328 _GLOBAL(_switch)
329 mflr r0
330 std r0,16(r1)
331 stdu r1,-SWITCH_FRAME_SIZE(r1)
332 /* r3-r13 are caller saved -- Cort */
333 SAVE_8GPRS(14, r1)
334 SAVE_10GPRS(22, r1)
335 mflr r20 /* Return to switch caller */
336 mfmsr r22
337 li r0, MSR_FP
338 #ifdef CONFIG_ALTIVEC
339 BEGIN_FTR_SECTION
340 oris r0,r0,MSR_VEC@h /* Disable altivec */
341 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
342 std r24,THREAD_VRSAVE(r3)
343 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
344 #endif /* CONFIG_ALTIVEC */
345 and. r0,r0,r22
346 beq+ 1f
347 andc r22,r22,r0
348 mtmsrd r22
349 isync
350 1: std r20,_NIP(r1)
351 mfcr r23
352 std r23,_CCR(r1)
353 std r1,KSP(r3) /* Set old stack pointer */
354
355 #ifdef CONFIG_SMP
356 /* We need a sync somewhere here to make sure that if the
357 * previous task gets rescheduled on another CPU, it sees all
358 * stores it has performed on this one.
359 */
360 sync
361 #endif /* CONFIG_SMP */
362
363 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
364 std r6,PACACURRENT(r13) /* Set new 'current' */
365
366 ld r8,KSP(r4) /* new stack pointer */
367 BEGIN_FTR_SECTION
368 clrrdi r6,r8,28 /* get its ESID */
369 clrrdi r9,r1,28 /* get current sp ESID */
370 clrldi. r0,r6,2 /* is new ESID c00000000? */
371 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
372 cror eq,4*cr1+eq,eq
373 beq 2f /* if yes, don't slbie it */
374
375 /* Bolt in the new stack SLB entry */
376 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
377 oris r0,r6,(SLB_ESID_V)@h
378 ori r0,r0,(SLB_NUM_BOLTED-1)@l
379 slbie r6
380 slbie r6 /* Workaround POWER5 < DD2.1 issue */
381 slbmte r7,r0
382 isync
383
384 2:
385 END_FTR_SECTION_IFSET(CPU_FTR_SLB)
386 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
387 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
388 because we don't need to leave the 288-byte ABI gap at the
389 top of the kernel stack. */
390 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
391
392 mr r1,r8 /* start using new stack pointer */
393 std r7,PACAKSAVE(r13)
394
395 ld r6,_CCR(r1)
396 mtcrf 0xFF,r6
397
398 #ifdef CONFIG_ALTIVEC
399 BEGIN_FTR_SECTION
400 ld r0,THREAD_VRSAVE(r4)
401 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
402 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
403 #endif /* CONFIG_ALTIVEC */
404
405 /* r3-r13 are destroyed -- Cort */
406 REST_8GPRS(14, r1)
407 REST_10GPRS(22, r1)
408
409 /* convert old thread to its task_struct for return value */
410 addi r3,r3,-THREAD
411 ld r7,_NIP(r1) /* Return to _switch caller in new task */
412 mtlr r7
413 addi r1,r1,SWITCH_FRAME_SIZE
414 blr
415
416 .align 7
417 _GLOBAL(ret_from_except)
418 ld r11,_TRAP(r1)
419 andi. r0,r11,1
420 bne .ret_from_except_lite
421 REST_NVGPRS(r1)
422
423 _GLOBAL(ret_from_except_lite)
424 /*
425 * Disable interrupts so that current_thread_info()->flags
426 * can't change between when we test it and when we return
427 * from the interrupt.
428 */
429 mfmsr r10 /* Get current interrupt state */
430 rldicl r9,r10,48,1 /* clear MSR_EE */
431 rotldi r9,r9,16
432 mtmsrd r9,1 /* Update machine state */
433
434 #ifdef CONFIG_PREEMPT
435 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
436 li r0,_TIF_NEED_RESCHED /* bits to check */
437 ld r3,_MSR(r1)
438 ld r4,TI_FLAGS(r9)
439 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
440 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
441 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
442 bne do_work
443
444 #else /* !CONFIG_PREEMPT */
445 ld r3,_MSR(r1) /* Returning to user mode? */
446 andi. r3,r3,MSR_PR
447 beq restore /* if not, just restore regs and return */
448
449 /* Check current_thread_info()->flags */
450 clrrdi r9,r1,THREAD_SHIFT
451 ld r4,TI_FLAGS(r9)
452 andi. r0,r4,_TIF_USER_WORK_MASK
453 bne do_work
454 #endif
455
456 restore:
457 #ifdef CONFIG_PPC_ISERIES
458 ld r5,SOFTE(r1)
459 cmpdi 0,r5,0
460 beq 4f
461 /* Check for pending interrupts (iSeries) */
462 ld r3,PACALPPACAPTR(r13)
463 ld r3,LPPACAANYINT(r3)
464 cmpdi r3,0
465 beq+ 4f /* skip do_IRQ if no interrupts */
466
467 li r3,0
468 stb r3,PACAPROCENABLED(r13) /* ensure we are soft-disabled */
469 ori r10,r10,MSR_EE
470 mtmsrd r10 /* hard-enable again */
471 addi r3,r1,STACK_FRAME_OVERHEAD
472 bl .do_IRQ
473 b .ret_from_except_lite /* loop back and handle more */
474
475 4: stb r5,PACAPROCENABLED(r13)
476 #endif
477
478 ld r3,_MSR(r1)
479 andi. r0,r3,MSR_RI
480 beq- unrecov_restore
481
482 andi. r0,r3,MSR_PR
483
484 /*
485 * r13 is our per cpu area, only restore it if we are returning to
486 * userspace
487 */
488 beq 1f
489 ACCOUNT_CPU_USER_EXIT(r3, r4)
490 REST_GPR(13, r1)
491 1:
492 ld r3,_CTR(r1)
493 ld r0,_LINK(r1)
494 mtctr r3
495 mtlr r0
496 ld r3,_XER(r1)
497 mtspr SPRN_XER,r3
498
499 REST_8GPRS(5, r1)
500
501 stdcx. r0,0,r1 /* to clear the reservation */
502
503 mfmsr r0
504 li r2, MSR_RI
505 andc r0,r0,r2
506 mtmsrd r0,1
507
508 ld r0,_MSR(r1)
509 mtspr SPRN_SRR1,r0
510
511 ld r2,_CCR(r1)
512 mtcrf 0xFF,r2
513 ld r2,_NIP(r1)
514 mtspr SPRN_SRR0,r2
515
516 ld r0,GPR0(r1)
517 ld r2,GPR2(r1)
518 ld r3,GPR3(r1)
519 ld r4,GPR4(r1)
520 ld r1,GPR1(r1)
521
522 rfid
523 b . /* prevent speculative execution */
524
525 /* Note: this must change if we start using the TIF_NOTIFY_RESUME bit */
526 do_work:
527 #ifdef CONFIG_PREEMPT
528 andi. r0,r3,MSR_PR /* Returning to user mode? */
529 bne user_work
530 /* Check that preempt_count() == 0 and interrupts are enabled */
531 lwz r8,TI_PREEMPT(r9)
532 cmpwi cr1,r8,0
533 #ifdef CONFIG_PPC_ISERIES
534 ld r0,SOFTE(r1)
535 cmpdi r0,0
536 #else
537 andi. r0,r3,MSR_EE
538 #endif
539 crandc eq,cr1*4+eq,eq
540 bne restore
541 /* here we are preempting the current task */
542 1:
543 #ifdef CONFIG_PPC_ISERIES
544 li r0,1
545 stb r0,PACAPROCENABLED(r13)
546 #endif
547 ori r10,r10,MSR_EE
548 mtmsrd r10,1 /* reenable interrupts */
549 bl .preempt_schedule
550 mfmsr r10
551 clrrdi r9,r1,THREAD_SHIFT
552 rldicl r10,r10,48,1 /* disable interrupts again */
553 rotldi r10,r10,16
554 mtmsrd r10,1
555 ld r4,TI_FLAGS(r9)
556 andi. r0,r4,_TIF_NEED_RESCHED
557 bne 1b
558 b restore
559
560 user_work:
561 #endif
562 /* Enable interrupts */
563 ori r10,r10,MSR_EE
564 mtmsrd r10,1
565
566 andi. r0,r4,_TIF_NEED_RESCHED
567 beq 1f
568 bl .schedule
569 b .ret_from_except_lite
570
571 1: bl .save_nvgprs
572 li r3,0
573 addi r4,r1,STACK_FRAME_OVERHEAD
574 bl .do_signal
575 b .ret_from_except
576
577 unrecov_restore:
578 addi r3,r1,STACK_FRAME_OVERHEAD
579 bl .unrecoverable_exception
580 b unrecov_restore
581
582 #ifdef CONFIG_PPC_RTAS
583 /*
584 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
585 * called with the MMU off.
586 *
587 * In addition, we need to be in 32b mode, at least for now.
588 *
589 * Note: r3 is an input parameter to rtas, so don't trash it...
590 */
591 _GLOBAL(enter_rtas)
592 mflr r0
593 std r0,16(r1)
594 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
595
596 /* Because RTAS is running in 32b mode, it clobbers the high order half
597 * of all registers that it saves. We therefore save those registers
598 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
599 */
600 SAVE_GPR(2, r1) /* Save the TOC */
601 SAVE_GPR(13, r1) /* Save paca */
602 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
603 SAVE_10GPRS(22, r1) /* ditto */
604
605 mfcr r4
606 std r4,_CCR(r1)
607 mfctr r5
608 std r5,_CTR(r1)
609 mfspr r6,SPRN_XER
610 std r6,_XER(r1)
611 mfdar r7
612 std r7,_DAR(r1)
613 mfdsisr r8
614 std r8,_DSISR(r1)
615 mfsrr0 r9
616 std r9,_SRR0(r1)
617 mfsrr1 r10
618 std r10,_SRR1(r1)
619
620 /* Temporary workaround to clear CR until RTAS can be modified to
621 * ignore all bits.
622 */
623 li r0,0
624 mtcr r0
625
626 /* There is no way it is acceptable to get here with interrupts enabled,
627 * check it with the asm equivalent of WARN_ON
628 */
629 mfmsr r6
630 andi. r0,r6,MSR_EE
631 1: tdnei r0,0
632 .section __bug_table,"a"
633 .llong 1b,__LINE__ + 0x1000000, 1f, 2f
634 .previous
635 .section .rodata,"a"
636 1: .asciz __FILE__
637 2: .asciz "enter_rtas"
638 .previous
639
640 /* Unfortunately, the stack pointer and the MSR are also clobbered,
641 * so they are saved in the PACA which allows us to restore
642 * our original state after RTAS returns.
643 */
644 std r1,PACAR1(r13)
645 std r6,PACASAVEDMSR(r13)
646
647 /* Setup our real return addr */
648 LOAD_REG_ADDR(r4,.rtas_return_loc)
649 clrldi r4,r4,2 /* convert to realmode address */
650 mtlr r4
651
652 li r0,0
653 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
654 andc r0,r6,r0
655
656 li r9,1
657 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
658 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP
659 andc r6,r0,r9
660 ori r6,r6,MSR_RI
661 sync /* disable interrupts so SRR0/1 */
662 mtmsrd r0 /* don't get trashed */
663
664 LOAD_REG_ADDR(r4, rtas)
665 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
666 ld r4,RTASBASE(r4) /* get the rtas->base value */
667
668 mtspr SPRN_SRR0,r5
669 mtspr SPRN_SRR1,r6
670 rfid
671 b . /* prevent speculative execution */
672
673 _STATIC(rtas_return_loc)
674 /* relocation is off at this point */
675 mfspr r4,SPRN_SPRG3 /* Get PACA */
676 clrldi r4,r4,2 /* convert to realmode address */
677
678 mfmsr r6
679 li r0,MSR_RI
680 andc r6,r6,r0
681 sync
682 mtmsrd r6
683
684 ld r1,PACAR1(r4) /* Restore our SP */
685 LOAD_REG_IMMEDIATE(r3,.rtas_restore_regs)
686 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
687
688 mtspr SPRN_SRR0,r3
689 mtspr SPRN_SRR1,r4
690 rfid
691 b . /* prevent speculative execution */
692
693 _STATIC(rtas_restore_regs)
694 /* relocation is on at this point */
695 REST_GPR(2, r1) /* Restore the TOC */
696 REST_GPR(13, r1) /* Restore paca */
697 REST_8GPRS(14, r1) /* Restore the non-volatiles */
698 REST_10GPRS(22, r1) /* ditto */
699
700 mfspr r13,SPRN_SPRG3
701
702 ld r4,_CCR(r1)
703 mtcr r4
704 ld r5,_CTR(r1)
705 mtctr r5
706 ld r6,_XER(r1)
707 mtspr SPRN_XER,r6
708 ld r7,_DAR(r1)
709 mtdar r7
710 ld r8,_DSISR(r1)
711 mtdsisr r8
712 ld r9,_SRR0(r1)
713 mtsrr0 r9
714 ld r10,_SRR1(r1)
715 mtsrr1 r10
716
717 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
718 ld r0,16(r1) /* get return address */
719
720 mtlr r0
721 blr /* return to caller */
722
723 #endif /* CONFIG_PPC_RTAS */
724
725 #ifdef CONFIG_PPC_MULTIPLATFORM
726
727 _GLOBAL(enter_prom)
728 mflr r0
729 std r0,16(r1)
730 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
731
732 /* Because PROM is running in 32b mode, it clobbers the high order half
733 * of all registers that it saves. We therefore save those registers
734 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
735 */
736 SAVE_8GPRS(2, r1)
737 SAVE_GPR(13, r1)
738 SAVE_8GPRS(14, r1)
739 SAVE_10GPRS(22, r1)
740 mfcr r4
741 std r4,_CCR(r1)
742 mfctr r5
743 std r5,_CTR(r1)
744 mfspr r6,SPRN_XER
745 std r6,_XER(r1)
746 mfdar r7
747 std r7,_DAR(r1)
748 mfdsisr r8
749 std r8,_DSISR(r1)
750 mfsrr0 r9
751 std r9,_SRR0(r1)
752 mfsrr1 r10
753 std r10,_SRR1(r1)
754 mfmsr r11
755 std r11,_MSR(r1)
756
757 /* Get the PROM entrypoint */
758 ld r0,GPR4(r1)
759 mtlr r0
760
761 /* Switch MSR to 32 bits mode
762 */
763 mfmsr r11
764 li r12,1
765 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
766 andc r11,r11,r12
767 li r12,1
768 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
769 andc r11,r11,r12
770 mtmsrd r11
771 isync
772
773 /* Restore arguments & enter PROM here... */
774 ld r3,GPR3(r1)
775 blrl
776
777 /* Just make sure that r1 top 32 bits didn't get
778 * corrupt by OF
779 */
780 rldicl r1,r1,0,32
781
782 /* Restore the MSR (back to 64 bits) */
783 ld r0,_MSR(r1)
784 mtmsrd r0
785 isync
786
787 /* Restore other registers */
788 REST_GPR(2, r1)
789 REST_GPR(13, r1)
790 REST_8GPRS(14, r1)
791 REST_10GPRS(22, r1)
792 ld r4,_CCR(r1)
793 mtcr r4
794 ld r5,_CTR(r1)
795 mtctr r5
796 ld r6,_XER(r1)
797 mtspr SPRN_XER,r6
798 ld r7,_DAR(r1)
799 mtdar r7
800 ld r8,_DSISR(r1)
801 mtdsisr r8
802 ld r9,_SRR0(r1)
803 mtsrr0 r9
804 ld r10,_SRR1(r1)
805 mtsrr1 r10
806
807 addi r1,r1,PROM_FRAME_SIZE
808 ld r0,16(r1)
809 mtlr r0
810 blr
811
812 #endif /* CONFIG_PPC_MULTIPLATFORM */
This page took 0.053038 seconds and 6 git commands to generate.