[PATCH] powerpc: Cleanup LOADADDR etc. asm macros
[deliverable/linux.git] / arch / powerpc / kernel / entry_64.S
CommitLineData
9994a338
PM
1/*
2 * arch/ppc64/kernel/entry.S
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
7 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
8 * Adapted for Power Macintosh by Paul Mackerras.
9 * Low-level exception handlers and MMU support
10 * rewritten by Paul Mackerras.
11 * Copyright (C) 1996 Paul Mackerras.
12 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
13 *
14 * This file contains the system call entry code, context switch
15 * code, and exception/interrupt return code for PowerPC.
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 */
22
23#include <linux/config.h>
24#include <linux/errno.h>
25#include <asm/unistd.h>
26#include <asm/processor.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/thread_info.h>
30#include <asm/ppc_asm.h>
31#include <asm/asm-offsets.h>
32#include <asm/cputable.h>
33
34#ifdef CONFIG_PPC_ISERIES
35#define DO_SOFT_DISABLE
36#endif
37
38/*
39 * System calls.
40 */
41 .section ".toc","aw"
42.SYS_CALL_TABLE:
43 .tc .sys_call_table[TC],.sys_call_table
44
45/* This value is used to mark exception frames on the stack. */
46exception_marker:
47 .tc ID_72656773_68657265[TC],0x7265677368657265
48
49 .section ".text"
50 .align 7
51
52#undef SHOW_SYSCALLS
53
54 .globl system_call_common
55system_call_common:
56 andi. r10,r12,MSR_PR
57 mr r10,r1
58 addi r1,r1,-INT_FRAME_SIZE
59 beq- 1f
60 ld r1,PACAKSAVE(r13)
611: std r10,0(r1)
62 std r11,_NIP(r1)
63 std r12,_MSR(r1)
64 std r0,GPR0(r1)
65 std r10,GPR1(r1)
66 std r2,GPR2(r1)
67 std r3,GPR3(r1)
68 std r4,GPR4(r1)
69 std r5,GPR5(r1)
70 std r6,GPR6(r1)
71 std r7,GPR7(r1)
72 std r8,GPR8(r1)
73 li r11,0
74 std r11,GPR9(r1)
75 std r11,GPR10(r1)
76 std r11,GPR11(r1)
77 std r11,GPR12(r1)
78 std r9,GPR13(r1)
79 crclr so
80 mfcr r9
81 mflr r10
82 li r11,0xc01
83 std r9,_CCR(r1)
84 std r10,_LINK(r1)
85 std r11,_TRAP(r1)
86 mfxer r9
87 mfctr r10
88 std r9,_XER(r1)
89 std r10,_CTR(r1)
90 std r3,ORIG_GPR3(r1)
91 ld r2,PACATOC(r13)
92 addi r9,r1,STACK_FRAME_OVERHEAD
93 ld r11,exception_marker@toc(r2)
94 std r11,-16(r9) /* "regshere" marker */
95#ifdef CONFIG_PPC_ISERIES
96 /* Hack for handling interrupts when soft-enabling on iSeries */
97 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
98 andi. r10,r12,MSR_PR /* from kernel */
99 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
100 beq hardware_interrupt_entry
101 lbz r10,PACAPROCENABLED(r13)
102 std r10,SOFTE(r1)
103#endif
104 mfmsr r11
105 ori r11,r11,MSR_EE
106 mtmsrd r11,1
107
108#ifdef SHOW_SYSCALLS
109 bl .do_show_syscall
110 REST_GPR(0,r1)
111 REST_4GPRS(3,r1)
112 REST_2GPRS(7,r1)
113 addi r9,r1,STACK_FRAME_OVERHEAD
114#endif
115 clrrdi r11,r1,THREAD_SHIFT
9994a338 116 ld r10,TI_FLAGS(r11)
9994a338
PM
117 andi. r11,r10,_TIF_SYSCALL_T_OR_A
118 bne- syscall_dotrace
119syscall_dotrace_cont:
120 cmpldi 0,r0,NR_syscalls
121 bge- syscall_enosys
122
123system_call: /* label this so stack traces look sane */
124/*
125 * Need to vector to 32 Bit or default sys_call_table here,
126 * based on caller's run-mode / personality.
127 */
128 ld r11,.SYS_CALL_TABLE@toc(2)
129 andi. r10,r10,_TIF_32BIT
130 beq 15f
131 addi r11,r11,8 /* use 32-bit syscall entries */
132 clrldi r3,r3,32
133 clrldi r4,r4,32
134 clrldi r5,r5,32
135 clrldi r6,r6,32
136 clrldi r7,r7,32
137 clrldi r8,r8,32
13815:
139 slwi r0,r0,4
140 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
141 mtctr r10
142 bctrl /* Call handler */
143
144syscall_exit:
401d1f02 145 std r3,RESULT(r1)
9994a338 146#ifdef SHOW_SYSCALLS
9994a338 147 bl .do_show_syscall_exit
401d1f02 148 ld r3,RESULT(r1)
9994a338 149#endif
9994a338 150 clrrdi r12,r1,THREAD_SHIFT
9994a338
PM
151
152 /* disable interrupts so current_thread_info()->flags can't change,
153 and so that we don't get interrupted after loading SRR0/1. */
154 ld r8,_MSR(r1)
155 andi. r10,r8,MSR_RI
156 beq- unrecov_restore
157 mfmsr r10
158 rldicl r10,r10,48,1
159 rotldi r10,r10,16
160 mtmsrd r10,1
161 ld r9,TI_FLAGS(r12)
401d1f02
DW
162 li r11,-_LAST_ERRNO
163 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL|_TIF_SAVE_NVGPRS|_TIF_NOERROR)
9994a338 164 bne- syscall_exit_work
401d1f02
DW
165 cmpld r3,r11
166 ld r5,_CCR(r1)
167 bge- syscall_error
168syscall_error_cont:
9994a338
PM
169 ld r7,_NIP(r1)
170 stdcx. r0,0,r1 /* to clear the reservation */
171 andi. r6,r8,MSR_PR
172 ld r4,_LINK(r1)
173 beq- 1f /* only restore r13 if */
174 ld r13,GPR13(r1) /* returning to usermode */
1751: ld r2,GPR2(r1)
176 li r12,MSR_RI
3eb6f26b
PM
177 andc r11,r10,r12
178 mtmsrd r11,1 /* clear MSR.RI */
9994a338
PM
179 ld r1,GPR1(r1)
180 mtlr r4
181 mtcr r5
182 mtspr SPRN_SRR0,r7
183 mtspr SPRN_SRR1,r8
184 rfid
185 b . /* prevent speculative execution */
186
401d1f02 187syscall_error:
9994a338 188 oris r5,r5,0x1000 /* Set SO bit in CR */
401d1f02 189 neg r3,r3
9994a338
PM
190 std r5,_CCR(r1)
191 b syscall_error_cont
401d1f02 192
9994a338
PM
193/* Traced system call support */
194syscall_dotrace:
195 bl .save_nvgprs
196 addi r3,r1,STACK_FRAME_OVERHEAD
197 bl .do_syscall_trace_enter
198 ld r0,GPR0(r1) /* Restore original registers */
199 ld r3,GPR3(r1)
200 ld r4,GPR4(r1)
201 ld r5,GPR5(r1)
202 ld r6,GPR6(r1)
203 ld r7,GPR7(r1)
204 ld r8,GPR8(r1)
205 addi r9,r1,STACK_FRAME_OVERHEAD
206 clrrdi r10,r1,THREAD_SHIFT
207 ld r10,TI_FLAGS(r10)
208 b syscall_dotrace_cont
209
401d1f02
DW
210syscall_enosys:
211 li r3,-ENOSYS
212 b syscall_exit
213
214syscall_exit_work:
215 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
216 If TIF_NOERROR is set, just save r3 as it is. */
217
218 andi. r0,r9,_TIF_RESTOREALL
219 bne- 2f
220 cmpld r3,r11 /* r10 is -LAST_ERRNO */
221 blt+ 1f
222 andi. r0,r9,_TIF_NOERROR
223 bne- 1f
224 ld r5,_CCR(r1)
225 neg r3,r3
226 oris r5,r5,0x1000 /* Set SO bit in CR */
227 std r5,_CCR(r1)
2281: std r3,GPR3(r1)
2292: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
230 beq 4f
231
232 /* Clear per-syscall TIF flags if any are set, but _leave_
233 _TIF_SAVE_NVGPRS set in r9 since we haven't dealt with that
234 yet. */
235
236 li r11,_TIF_PERSYSCALL_MASK
237 addi r12,r12,TI_FLAGS
2383: ldarx r10,0,r12
239 andc r10,r10,r11
240 stdcx. r10,0,r12
241 bne- 3b
242 subi r12,r12,TI_FLAGS
243
bcb05504 2444: bl .save_nvgprs
401d1f02
DW
245 /* Anything else left to do? */
246 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_SAVE_NVGPRS)
247 beq .ret_from_except_lite
248
249 /* Re-enable interrupts */
250 mfmsr r10
251 ori r10,r10,MSR_EE
252 mtmsrd r10,1
253
254 andi. r0,r9,_TIF_SAVE_NVGPRS
255 bne save_user_nvgprs
256
257 /* If tracing, re-enable interrupts and do it */
258save_user_nvgprs_cont:
259 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
260 beq 5f
261
9994a338
PM
262 addi r3,r1,STACK_FRAME_OVERHEAD
263 bl .do_syscall_trace_leave
264 REST_NVGPRS(r1)
9994a338 265 clrrdi r12,r1,THREAD_SHIFT
9994a338 266
401d1f02
DW
267 /* Disable interrupts again and handle other work if any */
2685: mfmsr r10
269 rldicl r10,r10,48,1
270 rotldi r10,r10,16
271 mtmsrd r10,1
272
9994a338
PM
273 b .ret_from_except_lite
274
275/* Save non-volatile GPRs, if not already saved. */
276_GLOBAL(save_nvgprs)
277 ld r11,_TRAP(r1)
278 andi. r0,r11,1
279 beqlr-
280 SAVE_NVGPRS(r1)
281 clrrdi r0,r11,1
282 std r0,_TRAP(r1)
283 blr
284
401d1f02
DW
285
286save_user_nvgprs:
287 ld r10,TI_SIGFRAME(r12)
288 andi. r0,r9,_TIF_32BIT
289 beq- save_user_nvgprs_64
290
291 /* 32-bit save to userspace */
292
293.macro savewords start, end
294 1: stw \start,4*(\start)(r10)
295 .section __ex_table,"a"
296 .align 3
297 .llong 1b,save_user_nvgprs_fault
298 .previous
299 .if \end - \start
300 savewords "(\start+1)",\end
301 .endif
302.endm
303 savewords 14,31
304 b save_user_nvgprs_cont
305
306save_user_nvgprs_64:
307 /* 64-bit save to userspace */
308
309.macro savelongs start, end
310 1: std \start,8*(\start)(r10)
311 .section __ex_table,"a"
312 .align 3
313 .llong 1b,save_user_nvgprs_fault
314 .previous
315 .if \end - \start
316 savelongs "(\start+1)",\end
317 .endif
318.endm
319 savelongs 14,31
320 b save_user_nvgprs_cont
321
322save_user_nvgprs_fault:
323 li r3,11 /* SIGSEGV */
324 ld r4,TI_TASK(r12)
325 bl .force_sigsegv
326
327 clrrdi r12,r1,THREAD_SHIFT
328 ld r9,TI_FLAGS(r12)
329 b save_user_nvgprs_cont
330
9994a338
PM
331/*
332 * The sigsuspend and rt_sigsuspend system calls can call do_signal
333 * and thus put the process into the stopped state where we might
334 * want to examine its user state with ptrace. Therefore we need
335 * to save all the nonvolatile registers (r14 - r31) before calling
336 * the C code. Similarly, fork, vfork and clone need the full
337 * register state on the stack so that it can be copied to the child.
338 */
9994a338
PM
339
340_GLOBAL(ppc_fork)
341 bl .save_nvgprs
342 bl .sys_fork
343 b syscall_exit
344
345_GLOBAL(ppc_vfork)
346 bl .save_nvgprs
347 bl .sys_vfork
348 b syscall_exit
349
350_GLOBAL(ppc_clone)
351 bl .save_nvgprs
352 bl .sys_clone
353 b syscall_exit
354
9994a338
PM
355_GLOBAL(ret_from_fork)
356 bl .schedule_tail
357 REST_NVGPRS(r1)
358 li r3,0
359 b syscall_exit
360
361/*
362 * This routine switches between two different tasks. The process
363 * state of one is saved on its kernel stack. Then the state
364 * of the other is restored from its kernel stack. The memory
365 * management hardware is updated to the second process's state.
366 * Finally, we can return to the second process, via ret_from_except.
367 * On entry, r3 points to the THREAD for the current task, r4
368 * points to the THREAD for the new task.
369 *
370 * Note: there are two ways to get to the "going out" portion
371 * of this code; either by coming in via the entry (_switch)
372 * or via "fork" which must set up an environment equivalent
373 * to the "_switch" path. If you change this you'll have to change
374 * the fork code also.
375 *
376 * The code which creates the new task context is in 'copy_thread'
377 * in arch/ppc64/kernel/process.c
378 */
379 .align 7
380_GLOBAL(_switch)
381 mflr r0
382 std r0,16(r1)
383 stdu r1,-SWITCH_FRAME_SIZE(r1)
384 /* r3-r13 are caller saved -- Cort */
385 SAVE_8GPRS(14, r1)
386 SAVE_10GPRS(22, r1)
387 mflr r20 /* Return to switch caller */
388 mfmsr r22
389 li r0, MSR_FP
390#ifdef CONFIG_ALTIVEC
391BEGIN_FTR_SECTION
392 oris r0,r0,MSR_VEC@h /* Disable altivec */
393 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
394 std r24,THREAD_VRSAVE(r3)
395END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
396#endif /* CONFIG_ALTIVEC */
397 and. r0,r0,r22
398 beq+ 1f
399 andc r22,r22,r0
400 mtmsrd r22
401 isync
4021: std r20,_NIP(r1)
403 mfcr r23
404 std r23,_CCR(r1)
405 std r1,KSP(r3) /* Set old stack pointer */
406
407#ifdef CONFIG_SMP
408 /* We need a sync somewhere here to make sure that if the
409 * previous task gets rescheduled on another CPU, it sees all
410 * stores it has performed on this one.
411 */
412 sync
413#endif /* CONFIG_SMP */
414
415 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
416 std r6,PACACURRENT(r13) /* Set new 'current' */
417
418 ld r8,KSP(r4) /* new stack pointer */
419BEGIN_FTR_SECTION
420 clrrdi r6,r8,28 /* get its ESID */
421 clrrdi r9,r1,28 /* get current sp ESID */
422 clrldi. r0,r6,2 /* is new ESID c00000000? */
423 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
424 cror eq,4*cr1+eq,eq
425 beq 2f /* if yes, don't slbie it */
426
427 /* Bolt in the new stack SLB entry */
428 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
429 oris r0,r6,(SLB_ESID_V)@h
430 ori r0,r0,(SLB_NUM_BOLTED-1)@l
431 slbie r6
432 slbie r6 /* Workaround POWER5 < DD2.1 issue */
433 slbmte r7,r0
434 isync
435
4362:
437END_FTR_SECTION_IFSET(CPU_FTR_SLB)
438 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
439 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
440 because we don't need to leave the 288-byte ABI gap at the
441 top of the kernel stack. */
442 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
443
444 mr r1,r8 /* start using new stack pointer */
445 std r7,PACAKSAVE(r13)
446
447 ld r6,_CCR(r1)
448 mtcrf 0xFF,r6
449
450#ifdef CONFIG_ALTIVEC
451BEGIN_FTR_SECTION
452 ld r0,THREAD_VRSAVE(r4)
453 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
454END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
455#endif /* CONFIG_ALTIVEC */
456
457 /* r3-r13 are destroyed -- Cort */
458 REST_8GPRS(14, r1)
459 REST_10GPRS(22, r1)
460
461 /* convert old thread to its task_struct for return value */
462 addi r3,r3,-THREAD
463 ld r7,_NIP(r1) /* Return to _switch caller in new task */
464 mtlr r7
465 addi r1,r1,SWITCH_FRAME_SIZE
466 blr
467
468 .align 7
469_GLOBAL(ret_from_except)
470 ld r11,_TRAP(r1)
471 andi. r0,r11,1
472 bne .ret_from_except_lite
473 REST_NVGPRS(r1)
474
475_GLOBAL(ret_from_except_lite)
476 /*
477 * Disable interrupts so that current_thread_info()->flags
478 * can't change between when we test it and when we return
479 * from the interrupt.
480 */
481 mfmsr r10 /* Get current interrupt state */
482 rldicl r9,r10,48,1 /* clear MSR_EE */
483 rotldi r9,r9,16
484 mtmsrd r9,1 /* Update machine state */
485
486#ifdef CONFIG_PREEMPT
487 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
488 li r0,_TIF_NEED_RESCHED /* bits to check */
489 ld r3,_MSR(r1)
490 ld r4,TI_FLAGS(r9)
491 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
492 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
493 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
494 bne do_work
495
496#else /* !CONFIG_PREEMPT */
497 ld r3,_MSR(r1) /* Returning to user mode? */
498 andi. r3,r3,MSR_PR
499 beq restore /* if not, just restore regs and return */
500
501 /* Check current_thread_info()->flags */
502 clrrdi r9,r1,THREAD_SHIFT
503 ld r4,TI_FLAGS(r9)
504 andi. r0,r4,_TIF_USER_WORK_MASK
505 bne do_work
506#endif
507
508restore:
509#ifdef CONFIG_PPC_ISERIES
510 ld r5,SOFTE(r1)
511 cmpdi 0,r5,0
512 beq 4f
513 /* Check for pending interrupts (iSeries) */
514 ld r3,PACALPPACA+LPPACAANYINT(r13)
515 cmpdi r3,0
516 beq+ 4f /* skip do_IRQ if no interrupts */
517
518 li r3,0
519 stb r3,PACAPROCENABLED(r13) /* ensure we are soft-disabled */
520 ori r10,r10,MSR_EE
521 mtmsrd r10 /* hard-enable again */
522 addi r3,r1,STACK_FRAME_OVERHEAD
523 bl .do_IRQ
524 b .ret_from_except_lite /* loop back and handle more */
525
5264: stb r5,PACAPROCENABLED(r13)
527#endif
528
529 ld r3,_MSR(r1)
530 andi. r0,r3,MSR_RI
531 beq- unrecov_restore
532
533 andi. r0,r3,MSR_PR
534
535 /*
536 * r13 is our per cpu area, only restore it if we are returning to
537 * userspace
538 */
539 beq 1f
540 REST_GPR(13, r1)
5411:
542 ld r3,_CTR(r1)
543 ld r0,_LINK(r1)
544 mtctr r3
545 mtlr r0
546 ld r3,_XER(r1)
547 mtspr SPRN_XER,r3
548
549 REST_8GPRS(5, r1)
550
551 stdcx. r0,0,r1 /* to clear the reservation */
552
553 mfmsr r0
554 li r2, MSR_RI
555 andc r0,r0,r2
556 mtmsrd r0,1
557
558 ld r0,_MSR(r1)
559 mtspr SPRN_SRR1,r0
560
561 ld r2,_CCR(r1)
562 mtcrf 0xFF,r2
563 ld r2,_NIP(r1)
564 mtspr SPRN_SRR0,r2
565
566 ld r0,GPR0(r1)
567 ld r2,GPR2(r1)
568 ld r3,GPR3(r1)
569 ld r4,GPR4(r1)
570 ld r1,GPR1(r1)
571
572 rfid
573 b . /* prevent speculative execution */
574
575/* Note: this must change if we start using the TIF_NOTIFY_RESUME bit */
576do_work:
577#ifdef CONFIG_PREEMPT
578 andi. r0,r3,MSR_PR /* Returning to user mode? */
579 bne user_work
580 /* Check that preempt_count() == 0 and interrupts are enabled */
581 lwz r8,TI_PREEMPT(r9)
582 cmpwi cr1,r8,0
583#ifdef CONFIG_PPC_ISERIES
584 ld r0,SOFTE(r1)
585 cmpdi r0,0
586#else
587 andi. r0,r3,MSR_EE
588#endif
589 crandc eq,cr1*4+eq,eq
590 bne restore
591 /* here we are preempting the current task */
5921:
593#ifdef CONFIG_PPC_ISERIES
594 li r0,1
595 stb r0,PACAPROCENABLED(r13)
596#endif
597 ori r10,r10,MSR_EE
598 mtmsrd r10,1 /* reenable interrupts */
599 bl .preempt_schedule
600 mfmsr r10
601 clrrdi r9,r1,THREAD_SHIFT
602 rldicl r10,r10,48,1 /* disable interrupts again */
603 rotldi r10,r10,16
604 mtmsrd r10,1
605 ld r4,TI_FLAGS(r9)
606 andi. r0,r4,_TIF_NEED_RESCHED
607 bne 1b
608 b restore
609
610user_work:
611#endif
612 /* Enable interrupts */
613 ori r10,r10,MSR_EE
614 mtmsrd r10,1
615
616 andi. r0,r4,_TIF_NEED_RESCHED
617 beq 1f
618 bl .schedule
619 b .ret_from_except_lite
620
6211: bl .save_nvgprs
622 li r3,0
623 addi r4,r1,STACK_FRAME_OVERHEAD
624 bl .do_signal
625 b .ret_from_except
626
627unrecov_restore:
628 addi r3,r1,STACK_FRAME_OVERHEAD
629 bl .unrecoverable_exception
630 b unrecov_restore
631
632#ifdef CONFIG_PPC_RTAS
633/*
634 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
635 * called with the MMU off.
636 *
637 * In addition, we need to be in 32b mode, at least for now.
638 *
639 * Note: r3 is an input parameter to rtas, so don't trash it...
640 */
641_GLOBAL(enter_rtas)
642 mflr r0
643 std r0,16(r1)
644 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
645
646 /* Because RTAS is running in 32b mode, it clobbers the high order half
647 * of all registers that it saves. We therefore save those registers
648 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
649 */
650 SAVE_GPR(2, r1) /* Save the TOC */
651 SAVE_GPR(13, r1) /* Save paca */
652 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
653 SAVE_10GPRS(22, r1) /* ditto */
654
655 mfcr r4
656 std r4,_CCR(r1)
657 mfctr r5
658 std r5,_CTR(r1)
659 mfspr r6,SPRN_XER
660 std r6,_XER(r1)
661 mfdar r7
662 std r7,_DAR(r1)
663 mfdsisr r8
664 std r8,_DSISR(r1)
665 mfsrr0 r9
666 std r9,_SRR0(r1)
667 mfsrr1 r10
668 std r10,_SRR1(r1)
669
670 /* There is no way it is acceptable to get here with interrupts enabled,
671 * check it with the asm equivalent of WARN_ON
672 */
673 mfmsr r6
674 andi. r0,r6,MSR_EE
6751: tdnei r0,0
676.section __bug_table,"a"
677 .llong 1b,__LINE__ + 0x1000000, 1f, 2f
678.previous
679.section .rodata,"a"
6801: .asciz __FILE__
6812: .asciz "enter_rtas"
682.previous
683
684 /* Unfortunately, the stack pointer and the MSR are also clobbered,
685 * so they are saved in the PACA which allows us to restore
686 * our original state after RTAS returns.
687 */
688 std r1,PACAR1(r13)
689 std r6,PACASAVEDMSR(r13)
690
691 /* Setup our real return addr */
e58c3495
DG
692 LOAD_REG_ADDR(r4,.rtas_return_loc)
693 clrldi r4,r4,2 /* convert to realmode address */
9994a338
PM
694 mtlr r4
695
696 li r0,0
697 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
698 andc r0,r6,r0
699
700 li r9,1
701 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
702 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP
703 andc r6,r0,r9
704 ori r6,r6,MSR_RI
705 sync /* disable interrupts so SRR0/1 */
706 mtmsrd r0 /* don't get trashed */
707
e58c3495 708 LOAD_REG_ADDR(r4, rtas)
9994a338
PM
709 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
710 ld r4,RTASBASE(r4) /* get the rtas->base value */
711
712 mtspr SPRN_SRR0,r5
713 mtspr SPRN_SRR1,r6
714 rfid
715 b . /* prevent speculative execution */
716
717_STATIC(rtas_return_loc)
718 /* relocation is off at this point */
719 mfspr r4,SPRN_SPRG3 /* Get PACA */
e58c3495 720 clrldi r4,r4,2 /* convert to realmode address */
9994a338
PM
721
722 mfmsr r6
723 li r0,MSR_RI
724 andc r6,r6,r0
725 sync
726 mtmsrd r6
727
728 ld r1,PACAR1(r4) /* Restore our SP */
e58c3495 729 LOAD_REG_IMMEDIATE(r3,.rtas_restore_regs)
9994a338
PM
730 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
731
732 mtspr SPRN_SRR0,r3
733 mtspr SPRN_SRR1,r4
734 rfid
735 b . /* prevent speculative execution */
736
737_STATIC(rtas_restore_regs)
738 /* relocation is on at this point */
739 REST_GPR(2, r1) /* Restore the TOC */
740 REST_GPR(13, r1) /* Restore paca */
741 REST_8GPRS(14, r1) /* Restore the non-volatiles */
742 REST_10GPRS(22, r1) /* ditto */
743
744 mfspr r13,SPRN_SPRG3
745
746 ld r4,_CCR(r1)
747 mtcr r4
748 ld r5,_CTR(r1)
749 mtctr r5
750 ld r6,_XER(r1)
751 mtspr SPRN_XER,r6
752 ld r7,_DAR(r1)
753 mtdar r7
754 ld r8,_DSISR(r1)
755 mtdsisr r8
756 ld r9,_SRR0(r1)
757 mtsrr0 r9
758 ld r10,_SRR1(r1)
759 mtsrr1 r10
760
761 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
762 ld r0,16(r1) /* get return address */
763
764 mtlr r0
765 blr /* return to caller */
766
767#endif /* CONFIG_PPC_RTAS */
768
769#ifdef CONFIG_PPC_MULTIPLATFORM
770
771_GLOBAL(enter_prom)
772 mflr r0
773 std r0,16(r1)
774 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
775
776 /* Because PROM is running in 32b mode, it clobbers the high order half
777 * of all registers that it saves. We therefore save those registers
778 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
779 */
780 SAVE_8GPRS(2, r1)
781 SAVE_GPR(13, r1)
782 SAVE_8GPRS(14, r1)
783 SAVE_10GPRS(22, r1)
784 mfcr r4
785 std r4,_CCR(r1)
786 mfctr r5
787 std r5,_CTR(r1)
788 mfspr r6,SPRN_XER
789 std r6,_XER(r1)
790 mfdar r7
791 std r7,_DAR(r1)
792 mfdsisr r8
793 std r8,_DSISR(r1)
794 mfsrr0 r9
795 std r9,_SRR0(r1)
796 mfsrr1 r10
797 std r10,_SRR1(r1)
798 mfmsr r11
799 std r11,_MSR(r1)
800
801 /* Get the PROM entrypoint */
802 ld r0,GPR4(r1)
803 mtlr r0
804
805 /* Switch MSR to 32 bits mode
806 */
807 mfmsr r11
808 li r12,1
809 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
810 andc r11,r11,r12
811 li r12,1
812 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
813 andc r11,r11,r12
814 mtmsrd r11
815 isync
816
817 /* Restore arguments & enter PROM here... */
818 ld r3,GPR3(r1)
819 blrl
820
821 /* Just make sure that r1 top 32 bits didn't get
822 * corrupt by OF
823 */
824 rldicl r1,r1,0,32
825
826 /* Restore the MSR (back to 64 bits) */
827 ld r0,_MSR(r1)
828 mtmsrd r0
829 isync
830
831 /* Restore other registers */
832 REST_GPR(2, r1)
833 REST_GPR(13, r1)
834 REST_8GPRS(14, r1)
835 REST_10GPRS(22, r1)
836 ld r4,_CCR(r1)
837 mtcr r4
838 ld r5,_CTR(r1)
839 mtctr r5
840 ld r6,_XER(r1)
841 mtspr SPRN_XER,r6
842 ld r7,_DAR(r1)
843 mtdar r7
844 ld r8,_DSISR(r1)
845 mtdsisr r8
846 ld r9,_SRR0(r1)
847 mtsrr0 r9
848 ld r10,_SRR1(r1)
849 mtsrr1 r10
850
851 addi r1,r1,PROM_FRAME_SIZE
852 ld r0,16(r1)
853 mtlr r0
854 blr
855
856#endif /* CONFIG_PPC_MULTIPLATFORM */
This page took 1.371395 seconds and 5 git commands to generate.